diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt index 4ceef53164b0..d1ad9d5cae46 100644 --- a/Documentation/virtual/kvm/devices/s390_flic.txt +++ b/Documentation/virtual/kvm/devices/s390_flic.txt @@ -27,6 +27,9 @@ Groups: Copies all floating interrupts into a buffer provided by userspace. When the buffer is too small it returns -ENOMEM, which is the indication for userspace to try again with a bigger buffer. + -ENOBUFS is returned when the allocation of a kernelspace buffer has + failed. + -EFAULT is returned when copying data to userspace failed. All interrupts remain pending, i.e. are not deleted from the list of currently pending interrupts. attr->addr contains the userspace address of the buffer into which all diff --git a/Makefile b/Makefile index 65c7c8756803..69952c1404b2 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 19 -SUBLEVEL = 6 +SUBLEVEL = 7 EXTRAVERSION = NAME = Diseased Newt diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts index fec1fca2ad66..6c4bc53cbf4e 100644 --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts @@ -167,7 +167,13 @@ macb1: ethernet@f802c000 { phy-mode = "rmii"; + #address-cells = <1>; + #size-cells = <0>; status = "okay"; + + ethernet-phy@1 { + reg = <0x1>; + }; }; dbgu: serial@ffffee00 { diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi index a5441d5482a6..3cc8b8320345 100644 --- a/arch/arm/boot/dts/dove.dtsi +++ b/arch/arm/boot/dts/dove.dtsi @@ -154,7 +154,7 @@ uart2: serial@12200 { compatible = "ns16550a"; - reg = <0x12000 0x100>; + reg = <0x12200 0x100>; reg-shift = <2>; interrupts = <9>; clocks = <&core_clk 0>; @@ -163,7 +163,7 @@ uart3: serial@12300 { compatible = "ns16550a"; - reg = <0x12100 0x100>; + reg = <0x12300 0x100>; reg-shift = <2>; interrupts = <10>; clocks = <&core_clk 0>; diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts index f02775487cd4..c41600e587e0 100644 --- a/arch/arm/boot/dts/exynos5250-spring.dts +++ b/arch/arm/boot/dts/exynos5250-spring.dts @@ -429,7 +429,6 @@ &mmc_0 { status = "okay"; num-slots = <1>; - supports-highspeed; broken-cd; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; @@ -437,11 +436,8 @@ samsung,dw-mshc-ddr-timing = <1 2>; pinctrl-names = "default"; pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_cd &sd0_bus4 &sd0_bus8>; - - slot@0 { - reg = <0>; - bus-width = <8>; - }; + bus-width = <8>; + cap-mmc-highspeed; }; /* @@ -451,7 +447,6 @@ &mmc_1 { status = "okay"; num-slots = <1>; - supports-highspeed; broken-cd; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; @@ -459,11 +454,8 @@ samsung,dw-mshc-ddr-timing = <1 2>; pinctrl-names = "default"; pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_cd &sd1_bus4>; - - slot@0 { - reg = <0>; - bus-width = <4>; - }; + bus-width = <4>; + cap-sd-highspeed; }; &pinctrl_0 { diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index afb9cafd3786..674d03f4ba15 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -115,7 +115,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) /* When the program starts, a1 contains a pointer to a function to be registered with atexit, as per the SVR4 ABI. A value of 0 means we diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 1bca8f8af442..5de64c0e477d 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -128,29 +128,28 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd) (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) +#define kvm_pgd_index(addr) pgd_index(addr) + static inline bool kvm_page_empty(void *ptr) { struct page *ptr_page = virt_to_page(ptr); return page_count(ptr_page) == 1; } - #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) #define kvm_pud_table_empty(kvm, pudp) (0) #define KVM_PREALLOC_LEVEL 0 -static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) +static inline void *kvm_get_hwpgd(struct kvm *kvm) { - return 0; + return kvm->arch.pgd; } -static inline void kvm_free_hwpgd(struct kvm *kvm) { } - -static inline void *kvm_get_hwpgd(struct kvm *kvm) +static inline unsigned int kvm_get_hwpgd_size(void) { - return kvm->arch.pgd; + return PTRS_PER_S2_PGD * sizeof(pgd_t); } struct kvm; @@ -186,7 +185,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; - VM_BUG_ON(size & PAGE_MASK); + VM_BUG_ON(size & ~PAGE_MASK); if (!need_flush && !icache_is_pipt()) goto vipt_cache; diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index 09ee408c1a67..b404cf886029 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -193,8 +193,14 @@ struct kvm_arch_memory_slot { #define KVM_ARM_IRQ_CPU_IRQ 0 #define KVM_ARM_IRQ_CPU_FIQ 1 -/* Highest supported SPI, from VGIC_NR_IRQS */ +/* + * This used to hold the highest supported SPI, but it is now obsolete + * and only here to provide source code level compatibility with older + * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS. + */ +#ifndef __KERNEL__ #define KVM_ARM_IRQ_GIC_MAX 127 +#endif /* PSCI interface */ #define KVM_PSCI_FN_BASE 0x95c1ba5e diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c index c4cc50e58c13..cfb354ff2a60 100644 --- a/arch/arm/kernel/hibernate.c +++ b/arch/arm/kernel/hibernate.c @@ -22,6 +22,7 @@ #include #include #include +#include "reboot.h" int pfn_is_nosave(unsigned long pfn) { @@ -61,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused) ret = swsusp_save(); if (ret == 0) - soft_restart(virt_to_phys(cpu_resume)); + _soft_restart(virt_to_phys(cpu_resume), false); return ret; } @@ -86,7 +87,7 @@ static void notrace arch_restore_image(void *unused) for (pbe = restore_pblist; pbe; pbe = pbe->next) copy_page(pbe->orig_address, pbe->address); - soft_restart(virt_to_phys(cpu_resume)); + _soft_restart(virt_to_phys(cpu_resume), false); } static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata; diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index fdfa3a78ec8c..2bf1a162defb 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -41,6 +41,7 @@ #include #include #include +#include "reboot.h" #ifdef CONFIG_CC_STACKPROTECTOR #include @@ -95,7 +96,7 @@ static void __soft_restart(void *addr) BUG(); } -void soft_restart(unsigned long addr) +void _soft_restart(unsigned long addr, bool disable_l2) { u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); @@ -104,7 +105,7 @@ void soft_restart(unsigned long addr) local_fiq_disable(); /* Disable the L2 if we're the last man standing. */ - if (num_online_cpus() == 1) + if (disable_l2) outer_disable(); /* Change to the new stack and continue with the reset. */ @@ -114,6 +115,11 @@ void soft_restart(unsigned long addr) BUG(); } +void soft_restart(unsigned long addr) +{ + _soft_restart(addr, num_online_cpus() == 1); +} + /* * Function pointers to optional machine specific functions */ diff --git a/arch/arm/kernel/reboot.h b/arch/arm/kernel/reboot.h new file mode 100644 index 000000000000..c87f05816d6b --- /dev/null +++ b/arch/arm/kernel/reboot.h @@ -0,0 +1,6 @@ +#ifndef REBOOT_H +#define REBOOT_H + +extern void _soft_restart(unsigned long addr, bool disable_l2); + +#endif diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 0b0d58a905c4..3ec96878e1c3 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -644,8 +644,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, if (!irqchip_in_kernel(kvm)) return -ENXIO; - if (irq_num < VGIC_NR_PRIVATE_IRQS || - irq_num > KVM_ARM_IRQ_GIC_MAX) + if (irq_num < VGIC_NR_PRIVATE_IRQS) return -EINVAL; return kvm_vgic_inject_irq(kvm, 0, irq_num, level); diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 136662547ca6..9ec6dfee6a9c 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -251,7 +251,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, phys_addr_t addr = start, end = start + size; phys_addr_t next; - pgd = pgdp + pgd_index(addr); + pgd = pgdp + kvm_pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); if (!pgd_none(*pgd)) @@ -316,7 +316,7 @@ static void stage2_flush_memslot(struct kvm *kvm, phys_addr_t next; pgd_t *pgd; - pgd = kvm->arch.pgd + pgd_index(addr); + pgd = kvm->arch.pgd + kvm_pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); stage2_flush_puds(kvm, pgd, addr, next); @@ -593,6 +593,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); } +/* Free the HW pgd, one page at a time */ +static void kvm_free_hwpgd(void *hwpgd) +{ + free_pages_exact(hwpgd, kvm_get_hwpgd_size()); +} + +/* Allocate the HW PGD, making sure that each page gets its own refcount */ +static void *kvm_alloc_hwpgd(void) +{ + unsigned int size = kvm_get_hwpgd_size(); + + return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); +} + /** * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. * @kvm: The KVM struct pointer for the VM. @@ -606,15 +620,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) */ int kvm_alloc_stage2_pgd(struct kvm *kvm) { - int ret; pgd_t *pgd; + void *hwpgd; if (kvm->arch.pgd != NULL) { kvm_err("kvm_arch already initialized?\n"); return -EINVAL; } + hwpgd = kvm_alloc_hwpgd(); + if (!hwpgd) + return -ENOMEM; + + /* When the kernel uses more levels of page tables than the + * guest, we allocate a fake PGD and pre-populate it to point + * to the next-level page table, which will be the real + * initial page table pointed to by the VTTBR. + * + * When KVM_PREALLOC_LEVEL==2, we allocate a single page for + * the PMD and the kernel will use folded pud. + * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD + * pages. + */ if (KVM_PREALLOC_LEVEL > 0) { + int i; + /* * Allocate fake pgd for the page table manipulation macros to * work. This is not used by the hardware and we have no @@ -622,30 +652,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) */ pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), GFP_KERNEL | __GFP_ZERO); + + if (!pgd) { + kvm_free_hwpgd(hwpgd); + return -ENOMEM; + } + + /* Plug the HW PGD into the fake one. */ + for (i = 0; i < PTRS_PER_S2_PGD; i++) { + if (KVM_PREALLOC_LEVEL == 1) + pgd_populate(NULL, pgd + i, + (pud_t *)hwpgd + i * PTRS_PER_PUD); + else if (KVM_PREALLOC_LEVEL == 2) + pud_populate(NULL, pud_offset(pgd, 0) + i, + (pmd_t *)hwpgd + i * PTRS_PER_PMD); + } } else { /* * Allocate actual first-level Stage-2 page table used by the * hardware for Stage-2 page table walks. */ - pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); + pgd = (pgd_t *)hwpgd; } - if (!pgd) - return -ENOMEM; - - ret = kvm_prealloc_hwpgd(kvm, pgd); - if (ret) - goto out_err; - kvm_clean_pgd(pgd); kvm->arch.pgd = pgd; return 0; -out_err: - if (KVM_PREALLOC_LEVEL > 0) - kfree(pgd); - else - free_pages((unsigned long)pgd, S2_PGD_ORDER); - return ret; } /** @@ -746,11 +778,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm) return; unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); - kvm_free_hwpgd(kvm); + kvm_free_hwpgd(kvm_get_hwpgd(kvm)); if (KVM_PREALLOC_LEVEL > 0) kfree(kvm->arch.pgd); - else - free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); + kvm->arch.pgd = NULL; } @@ -760,7 +791,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache pgd_t *pgd; pud_t *pud; - pgd = kvm->arch.pgd + pgd_index(addr); + pgd = kvm->arch.pgd + kvm_pgd_index(addr); if (WARN_ON(pgd_none(*pgd))) { if (!cache) return NULL; diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c index d8ab605a44fa..e8ba3c692527 100644 --- a/arch/arm/mach-mvebu/pmsu.c +++ b/arch/arm/mach-mvebu/pmsu.c @@ -415,6 +415,9 @@ static __init int armada_38x_cpuidle_init(void) void __iomem *mpsoc_base; u32 reg; + pr_warn("CPU idle is currently broken on Armada 38x: disabling"); + return 0; + np = of_find_compatible_node(NULL, NULL, "marvell,armada-380-coherency-fabric"); if (!np) @@ -476,6 +479,16 @@ static int __init mvebu_v7_cpu_pm_init(void) return 0; of_node_put(np); + /* + * Currently the CPU idle support for Armada 38x is broken, as + * the CPU hotplug uses some of the CPU idle functions it is + * broken too, so let's disable it + */ + if (of_machine_is_compatible("marvell,armada380")) { + cpu_hotplug_disable(); + pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling"); + } + if (of_machine_is_compatible("marvell,armadaxp")) ret = armada_xp_cpuidle_init(); else if (of_machine_is_compatible("marvell,armada370")) @@ -489,7 +502,8 @@ static int __init mvebu_v7_cpu_pm_init(void) return ret; mvebu_v7_pmsu_enable_l2_powerdown_onidle(); - platform_device_register(&mvebu_v7_cpuidle_device); + if (mvebu_v7_cpuidle_device.name) + platform_device_register(&mvebu_v7_cpuidle_device); cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier); return 0; diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h index 7bc66682687e..dcbe17f5e5f8 100644 --- a/arch/arm/mach-s3c64xx/crag6410.h +++ b/arch/arm/mach-s3c64xx/crag6410.h @@ -14,6 +14,7 @@ #include #define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START +#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64) #define PCA935X_GPIO_BASE GPIO_BOARD_START #define CODEC_GPIO_BASE (GPIO_BOARD_START + 8) diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c index 10b913baab28..65c426bc45f7 100644 --- a/arch/arm/mach-s3c64xx/mach-crag6410.c +++ b/arch/arm/mach-s3c64xx/mach-crag6410.c @@ -554,6 +554,7 @@ static struct wm831x_touch_pdata touch_pdata = { static struct wm831x_pdata crag_pmic_pdata = { .wm831x_num = 1, + .irq_base = BANFF_PMIC_IRQ_BASE, .gpio_base = BANFF_PMIC_GPIO_BASE, .soft_shutdown = true, diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1f9a20a3677..840c232dd456 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -306,6 +306,27 @@ config ARM64_ERRATUM_832075 If unsure, say Y. +config ARM64_ERRATUM_845719 + bool "Cortex-A53: 845719: a load might read incorrect data" + depends on COMPAT + default y + help + This option adds an alternative code sequence to work around ARM + erratum 845719 on Cortex-A53 parts up to r0p4. + + When running a compat (AArch32) userspace on an affected Cortex-A53 + part, a load at EL0 from a virtual address that matches the bottom 32 + bits of the virtual address used by a recent load at (AArch64) EL1 + might return incorrect data. + + The workaround is to write the contextidr_el1 register on exception + return to a 32-bit task. + Please note that this does not necessarily enable the workaround, + as it depends on the alternative framework, which will only patch + the kernel if an affected CPU is detected. + + If unsure, say Y. + endmenu @@ -416,6 +437,10 @@ config HOTPLUG_CPU source kernel/Kconfig.preempt +config UP_LATE_INIT + def_bool y + depends on !SMP + config HZ int default 100 diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 07547ccc1f2b..5fe4befda1a5 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -23,8 +23,9 @@ #define ARM64_WORKAROUND_CLEAN_CACHE 0 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 +#define ARM64_WORKAROUND_845719 2 -#define ARM64_NCAPS 2 +#define ARM64_NCAPS 3 #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index adcf49547301..df150ae862f2 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -137,6 +137,8 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd) #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) +#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) + /* * If we are concatenating first level stage-2 page tables, we would have less * than or equal to 16 pointers in the fake PGD, because that's what the @@ -150,43 +152,6 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd) #define KVM_PREALLOC_LEVEL (0) #endif -/** - * kvm_prealloc_hwpgd - allocate inital table for VTTBR - * @kvm: The KVM struct pointer for the VM. - * @pgd: The kernel pseudo pgd - * - * When the kernel uses more levels of page tables than the guest, we allocate - * a fake PGD and pre-populate it to point to the next-level page table, which - * will be the real initial page table pointed to by the VTTBR. - * - * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and - * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we - * allocate 2 consecutive PUD pages. - */ -static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) -{ - unsigned int i; - unsigned long hwpgd; - - if (KVM_PREALLOC_LEVEL == 0) - return 0; - - hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT); - if (!hwpgd) - return -ENOMEM; - - for (i = 0; i < PTRS_PER_S2_PGD; i++) { - if (KVM_PREALLOC_LEVEL == 1) - pgd_populate(NULL, pgd + i, - (pud_t *)hwpgd + i * PTRS_PER_PUD); - else if (KVM_PREALLOC_LEVEL == 2) - pud_populate(NULL, pud_offset(pgd, 0) + i, - (pmd_t *)hwpgd + i * PTRS_PER_PMD); - } - - return 0; -} - static inline void *kvm_get_hwpgd(struct kvm *kvm) { pgd_t *pgd = kvm->arch.pgd; @@ -203,12 +168,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm) return pmd_offset(pud, 0); } -static inline void kvm_free_hwpgd(struct kvm *kvm) +static inline unsigned int kvm_get_hwpgd_size(void) { - if (KVM_PREALLOC_LEVEL > 0) { - unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); - free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); - } + if (KVM_PREALLOC_LEVEL > 0) + return PTRS_PER_S2_PGD * PAGE_SIZE; + return PTRS_PER_S2_PGD * sizeof(pgd_t); } static inline bool kvm_page_empty(void *ptr) diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h index 59e282311b58..8dcd61e32176 100644 --- a/arch/arm64/include/asm/smp_plat.h +++ b/arch/arm64/include/asm/smp_plat.h @@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void) extern u64 __cpu_logical_map[NR_CPUS]; #define cpu_logical_map(cpu) __cpu_logical_map[cpu] +void __init do_post_cpus_up_work(void); + #endif /* __ASM_SMP_PLAT_H */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 8e38878c87c6..d9e9822efcee 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -179,8 +179,14 @@ struct kvm_arch_memory_slot { #define KVM_ARM_IRQ_CPU_IRQ 0 #define KVM_ARM_IRQ_CPU_FIQ 1 -/* Highest supported SPI, from VGIC_NR_IRQS */ +/* + * This used to hold the highest supported SPI, but it is now obsolete + * and only here to provide source code level compatibility with older + * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS. + */ +#ifndef __KERNEL__ #define KVM_ARM_IRQ_GIC_MAX 127 +#endif /* PSCI interface */ #define KVM_PSCI_FN_BASE 0x95c1ba5e diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index fa62637e63a8..ad6d52392bb1 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -88,7 +88,16 @@ struct arm64_cpu_capabilities arm64_errata[] = { /* Cortex-A57 r0p0 - r1p2 */ .desc = "ARM erratum 832075", .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, - MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12), + MIDR_RANGE(MIDR_CORTEX_A57, 0x00, + (1 << MIDR_VARIANT_SHIFT) | 2), + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_845719 + { + /* Cortex-A53 r0p[01234] */ + .desc = "ARM erratum 845719", + .capability = ARM64_WORKAROUND_845719, + MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), }, #endif { diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index fd4fa374e5d2..9b870a2815e6 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -21,8 +21,10 @@ #include #include +#include #include #include +#include #include #include #include @@ -120,6 +122,24 @@ ct_user_enter ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 + +#ifdef CONFIG_ARM64_ERRATUM_845719 + alternative_insn \ + "nop", \ + "tbz x22, #4, 1f", \ + ARM64_WORKAROUND_845719 +#ifdef CONFIG_PID_IN_CONTEXTIDR + alternative_insn \ + "nop; nop", \ + "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \ + ARM64_WORKAROUND_845719 +#else + alternative_insn \ + "nop", \ + "msr contextidr_el1, xzr; 1:", \ + ARM64_WORKAROUND_845719 +#endif +#endif .endif msr elr_el1, x21 // set up the return data msr spsr_el1, x22 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 8ce88e08c030..98af7da1b8b5 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -426,6 +426,7 @@ __create_page_tables: */ mov x0, x25 add x1, x26, #SWAPPER_DIR_SIZE + dmb sy bl __inval_cache_range mov lr, x27 diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 20fe2932ad0c..79fedd8dbdd7 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -206,6 +206,18 @@ static void __init smp_build_mpidr_hash(void) } #endif +void __init do_post_cpus_up_work(void) +{ + apply_alternatives_all(); +} + +#ifdef CONFIG_UP_LATE_INIT +void __init up_late_init(void) +{ + do_post_cpus_up_work(); +} +#endif /* CONFIG_UP_LATE_INIT */ + static void __init setup_processor(void) { struct cpu_info *cpu_info; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 7ae6ee085261..e1b857fa2a1d 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -310,7 +310,7 @@ void cpu_die(void) void __init smp_cpus_done(unsigned int max_cpus) { pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); - apply_alternatives_all(); + do_post_cpus_up_work(); } void __init smp_prepare_boot_cpu(void) diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c index 356ee84cad95..04845aaf5985 100644 --- a/arch/c6x/kernel/time.c +++ b/arch/c6x/kernel/time.c @@ -49,7 +49,7 @@ u64 sched_clock(void) return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT; } -void time_init(void) +void __init time_init(void) { u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT; diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h index e41c56e375b1..1e38f0e1ea3e 100644 --- a/arch/mips/include/asm/asm-eva.h +++ b/arch/mips/include/asm/asm-eva.h @@ -11,6 +11,36 @@ #define __ASM_ASM_EVA_H #ifndef __ASSEMBLY__ + +/* Kernel variants */ + +#define kernel_cache(op, base) "cache " op ", " base "\n" +#define kernel_ll(reg, addr) "ll " reg ", " addr "\n" +#define kernel_sc(reg, addr) "sc " reg ", " addr "\n" +#define kernel_lw(reg, addr) "lw " reg ", " addr "\n" +#define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n" +#define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n" +#define kernel_lh(reg, addr) "lh " reg ", " addr "\n" +#define kernel_lb(reg, addr) "lb " reg ", " addr "\n" +#define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n" +#define kernel_sw(reg, addr) "sw " reg ", " addr "\n" +#define kernel_swl(reg, addr) "swl " reg ", " addr "\n" +#define kernel_swr(reg, addr) "swr " reg ", " addr "\n" +#define kernel_sh(reg, addr) "sh " reg ", " addr "\n" +#define kernel_sb(reg, addr) "sb " reg ", " addr "\n" + +#ifdef CONFIG_32BIT +/* + * No 'sd' or 'ld' instructions in 32-bit but the code will + * do the correct thing + */ +#define kernel_sd(reg, addr) user_sw(reg, addr) +#define kernel_ld(reg, addr) user_lw(reg, addr) +#else +#define kernel_sd(reg, addr) "sd " reg", " addr "\n" +#define kernel_ld(reg, addr) "ld " reg", " addr "\n" +#endif /* CONFIG_32BIT */ + #ifdef CONFIG_EVA #define __BUILD_EVA_INSN(insn, reg, addr) \ @@ -41,37 +71,60 @@ #else -#define user_cache(op, base) "cache " op ", " base "\n" -#define user_ll(reg, addr) "ll " reg ", " addr "\n" -#define user_sc(reg, addr) "sc " reg ", " addr "\n" -#define user_lw(reg, addr) "lw " reg ", " addr "\n" -#define user_lwl(reg, addr) "lwl " reg ", " addr "\n" -#define user_lwr(reg, addr) "lwr " reg ", " addr "\n" -#define user_lh(reg, addr) "lh " reg ", " addr "\n" -#define user_lb(reg, addr) "lb " reg ", " addr "\n" -#define user_lbu(reg, addr) "lbu " reg ", " addr "\n" -#define user_sw(reg, addr) "sw " reg ", " addr "\n" -#define user_swl(reg, addr) "swl " reg ", " addr "\n" -#define user_swr(reg, addr) "swr " reg ", " addr "\n" -#define user_sh(reg, addr) "sh " reg ", " addr "\n" -#define user_sb(reg, addr) "sb " reg ", " addr "\n" +#define user_cache(op, base) kernel_cache(op, base) +#define user_ll(reg, addr) kernel_ll(reg, addr) +#define user_sc(reg, addr) kernel_sc(reg, addr) +#define user_lw(reg, addr) kernel_lw(reg, addr) +#define user_lwl(reg, addr) kernel_lwl(reg, addr) +#define user_lwr(reg, addr) kernel_lwr(reg, addr) +#define user_lh(reg, addr) kernel_lh(reg, addr) +#define user_lb(reg, addr) kernel_lb(reg, addr) +#define user_lbu(reg, addr) kernel_lbu(reg, addr) +#define user_sw(reg, addr) kernel_sw(reg, addr) +#define user_swl(reg, addr) kernel_swl(reg, addr) +#define user_swr(reg, addr) kernel_swr(reg, addr) +#define user_sh(reg, addr) kernel_sh(reg, addr) +#define user_sb(reg, addr) kernel_sb(reg, addr) #ifdef CONFIG_32BIT -/* - * No 'sd' or 'ld' instructions in 32-bit but the code will - * do the correct thing - */ -#define user_sd(reg, addr) user_sw(reg, addr) -#define user_ld(reg, addr) user_lw(reg, addr) +#define user_sd(reg, addr) kernel_sw(reg, addr) +#define user_ld(reg, addr) kernel_lw(reg, addr) #else -#define user_sd(reg, addr) "sd " reg", " addr "\n" -#define user_ld(reg, addr) "ld " reg", " addr "\n" +#define user_sd(reg, addr) kernel_sd(reg, addr) +#define user_ld(reg, addr) kernel_ld(reg, addr) #endif /* CONFIG_32BIT */ #endif /* CONFIG_EVA */ #else /* __ASSEMBLY__ */ +#define kernel_cache(op, base) cache op, base +#define kernel_ll(reg, addr) ll reg, addr +#define kernel_sc(reg, addr) sc reg, addr +#define kernel_lw(reg, addr) lw reg, addr +#define kernel_lwl(reg, addr) lwl reg, addr +#define kernel_lwr(reg, addr) lwr reg, addr +#define kernel_lh(reg, addr) lh reg, addr +#define kernel_lb(reg, addr) lb reg, addr +#define kernel_lbu(reg, addr) lbu reg, addr +#define kernel_sw(reg, addr) sw reg, addr +#define kernel_swl(reg, addr) swl reg, addr +#define kernel_swr(reg, addr) swr reg, addr +#define kernel_sh(reg, addr) sh reg, addr +#define kernel_sb(reg, addr) sb reg, addr + +#ifdef CONFIG_32BIT +/* + * No 'sd' or 'ld' instructions in 32-bit but the code will + * do the correct thing + */ +#define kernel_sd(reg, addr) user_sw(reg, addr) +#define kernel_ld(reg, addr) user_lw(reg, addr) +#else +#define kernel_sd(reg, addr) sd reg, addr +#define kernel_ld(reg, addr) ld reg, addr +#endif /* CONFIG_32BIT */ + #ifdef CONFIG_EVA #define __BUILD_EVA_INSN(insn, reg, addr) \ @@ -101,31 +154,27 @@ #define user_sd(reg, addr) user_sw(reg, addr) #else -#define user_cache(op, base) cache op, base -#define user_ll(reg, addr) ll reg, addr -#define user_sc(reg, addr) sc reg, addr -#define user_lw(reg, addr) lw reg, addr -#define user_lwl(reg, addr) lwl reg, addr -#define user_lwr(reg, addr) lwr reg, addr -#define user_lh(reg, addr) lh reg, addr -#define user_lb(reg, addr) lb reg, addr -#define user_lbu(reg, addr) lbu reg, addr -#define user_sw(reg, addr) sw reg, addr -#define user_swl(reg, addr) swl reg, addr -#define user_swr(reg, addr) swr reg, addr -#define user_sh(reg, addr) sh reg, addr -#define user_sb(reg, addr) sb reg, addr +#define user_cache(op, base) kernel_cache(op, base) +#define user_ll(reg, addr) kernel_ll(reg, addr) +#define user_sc(reg, addr) kernel_sc(reg, addr) +#define user_lw(reg, addr) kernel_lw(reg, addr) +#define user_lwl(reg, addr) kernel_lwl(reg, addr) +#define user_lwr(reg, addr) kernel_lwr(reg, addr) +#define user_lh(reg, addr) kernel_lh(reg, addr) +#define user_lb(reg, addr) kernel_lb(reg, addr) +#define user_lbu(reg, addr) kernel_lbu(reg, addr) +#define user_sw(reg, addr) kernel_sw(reg, addr) +#define user_swl(reg, addr) kernel_swl(reg, addr) +#define user_swr(reg, addr) kernel_swr(reg, addr) +#define user_sh(reg, addr) kernel_sh(reg, addr) +#define user_sb(reg, addr) kernel_sb(reg, addr) #ifdef CONFIG_32BIT -/* - * No 'sd' or 'ld' instructions in 32-bit but the code will - * do the correct thing - */ -#define user_sd(reg, addr) user_sw(reg, addr) -#define user_ld(reg, addr) user_lw(reg, addr) +#define user_sd(reg, addr) kernel_sw(reg, addr) +#define user_ld(reg, addr) kernel_lw(reg, addr) #else -#define user_sd(reg, addr) sd reg, addr -#define user_ld(reg, addr) ld reg, addr +#define user_sd(reg, addr) kernel_sd(reg, addr) +#define user_ld(reg, addr) kernel_sd(reg, addr) #endif /* CONFIG_32BIT */ #endif /* CONFIG_EVA */ diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index affebb78f5d6..270ecc148443 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -169,6 +169,7 @@ static inline void lose_fpu(int save) } disable_msa(); clear_thread_flag(TIF_USEDMSA); + __disable_fpu(); } else if (is_fpu_owner()) { if (save) _save_fp(current); diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index f2c249796ea8..4e3205a3bee2 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -321,6 +321,7 @@ enum mips_mmu_types { #define T_TRAP 13 /* Trap instruction */ #define T_VCEI 14 /* Virtual coherency exception */ #define T_FPE 15 /* Floating point exception */ +#define T_MSADIS 21 /* MSA disabled exception */ #define T_WATCH 23 /* Watch address reference */ #define T_VCED 31 /* Virtual coherency data */ @@ -577,6 +578,7 @@ struct kvm_mips_callbacks { int (*handle_syscall)(struct kvm_vcpu *vcpu); int (*handle_res_inst)(struct kvm_vcpu *vcpu); int (*handle_break)(struct kvm_vcpu *vcpu); + int (*handle_msa_disabled)(struct kvm_vcpu *vcpu); int (*vm_init)(struct kvm *kvm); int (*vcpu_init)(struct kvm_vcpu *vcpu); int (*vcpu_setup)(struct kvm_vcpu *vcpu); diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index fb3e8dfd1ff6..838d3a6a5b7d 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -2176,6 +2176,7 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause, case T_SYSCALL: case T_BREAK: case T_RES_INST: + case T_MSADIS: break; case T_COP_UNUSABLE: diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 270bbd41769e..39074fb83bad 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1119,6 +1119,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ret = kvm_mips_callbacks->handle_break(vcpu); break; + case T_MSADIS: + ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); + break; + default: kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index fd7257b70e65..4372cc86650c 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -330,6 +330,33 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) return ret; } +static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + /* No MSA supported in guest, guest reserved instruction exception */ + er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); + + switch (er) { + case EMULATE_DONE: + ret = RESUME_GUEST; + break; + + case EMULATE_FAIL: + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + break; + + default: + BUG(); + } + return ret; +} + static int kvm_trap_emul_vm_init(struct kvm *kvm) { return 0; @@ -470,6 +497,7 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { .handle_syscall = kvm_trap_emul_handle_syscall, .handle_res_inst = kvm_trap_emul_handle_res_inst, .handle_break = kvm_trap_emul_handle_break, + .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, .vm_init = kvm_trap_emul_vm_init, .vcpu_init = kvm_trap_emul_vcpu_init, diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c index 21221edda7a9..0f75b6b3d218 100644 --- a/arch/mips/loongson/loongson-3/irq.c +++ b/arch/mips/loongson/loongson-3/irq.c @@ -44,6 +44,7 @@ void mach_irq_dispatch(unsigned int pending) static struct irqaction cascade_irqaction = { .handler = no_action, + .flags = IRQF_NO_SUSPEND, .name = "cascade", }; diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c index 8fddd2cdbff7..efe366d618b1 100644 --- a/arch/mips/mti-malta/malta-memory.c +++ b/arch/mips/mti-malta/malta-memory.c @@ -53,6 +53,12 @@ fw_memblock_t * __init fw_getmdesc(int eva) pr_warn("memsize not set in YAMON, set to default (32Mb)\n"); physical_memsize = 0x02000000; } else { + if (memsize > (256 << 20)) { /* memsize should be capped to 256M */ + pr_warn("Unsupported memsize value (0x%lx) detected! " + "Using 0x10000000 (256M) instead\n", + memsize); + memsize = 256 << 20; + } /* If ememsize is set, then set physical_memsize to that */ physical_memsize = ememsize ? : memsize; } diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S index 32a7c828f073..e7567c8a9e79 100644 --- a/arch/mips/power/hibernate.S +++ b/arch/mips/power/hibernate.S @@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend) END(swsusp_arch_suspend) LEAF(swsusp_arch_resume) + /* Avoid TLB mismatch during and after kernel resume */ + jal local_flush_tlb_all PTR_L t0, restore_pblist 0: PTR_L t1, PBE_ADDRESS(t0) /* source */ @@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume) bne t1, t3, 1b PTR_L t0, PBE_NEXT(t0) bnez t0, 0b - jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */ PTR_LA t0, saved_regs PTR_L ra, PT_R31(t0) PTR_L sp, PT_R29(t0) diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index 40198d50b4c2..8005b79ecbcf 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -61,12 +61,22 @@ struct cache_type_info { }; /* These are used to index the cache_type_info array. */ -#define CACHE_TYPE_UNIFIED 0 -#define CACHE_TYPE_INSTRUCTION 1 -#define CACHE_TYPE_DATA 2 +#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */ +#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */ +#define CACHE_TYPE_INSTRUCTION 2 +#define CACHE_TYPE_DATA 3 static const struct cache_type_info cache_type_info[] = { { + /* Embedded systems that use cache-size, cache-block-size, + * etc. for the Unified (typically L2) cache. */ + .name = "Unified", + .size_prop = "cache-size", + .line_size_props = { "cache-line-size", + "cache-block-size", }, + .nr_sets_prop = "cache-sets", + }, + { /* PowerPC Processor binding says the [di]-cache-* * must be equal on unified caches, so just use * d-cache properties. */ @@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache) { struct cache *iter; - if (cache->type == CACHE_TYPE_UNIFIED) + if (cache->type == CACHE_TYPE_UNIFIED || + cache->type == CACHE_TYPE_UNIFIED_D) return cache; list_for_each_entry(iter, &cache_list, list) @@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np) return of_get_property(np, "cache-unified", NULL); } -static struct cache *cache_do_one_devnode_unified(struct device_node *node, - int level) +/* + * Unified caches can have two different sets of tags. Most embedded + * use cache-size, etc. for the unified cache size, but open firmware systems + * use d-cache-size, etc. Check on initialization for which type we have, and + * return the appropriate structure type. Assume it's embedded if it isn't + * open firmware. If it's yet a 3rd type, then there will be missing entries + * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need + * to be extended further. + */ +static int cache_is_unified_d(const struct device_node *np) { - struct cache *cache; + return of_get_property(np, + cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ? + CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED; +} +/* + */ +static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level) +{ pr_debug("creating L%d ucache for %s\n", level, node->full_name); - cache = new_cache(CACHE_TYPE_UNIFIED, level, node); - - return cache; + return new_cache(cache_is_unified_d(node), level, node); } static struct cache *cache_do_one_devnode_split(struct device_node *node, diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index 2396dda282cd..ead55351b254 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c @@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, sp = regs->gpr[1]; perf_callchain_store(entry, next_ip); - for (;;) { + while (entry->nr < PERF_MAX_STACK_DEPTH) { fp = (unsigned long __user *) sp; if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) return; diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 4c11421847be..3af8324c122e 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void) void iic_setup_cpu(void) { - out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff); + out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff); } u8 iic_get_target_id(int cpu) diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index c7c8720aa39f..63db1b03e756 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages, io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); - for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift) + for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift)) io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask); mb(); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 6a9a255d8058..2578b148ddc4 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1750,7 +1750,8 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose, region.start += phb->ioda.io_segsize; index++; } - } else if (res->flags & IORESOURCE_MEM) { + } else if ((res->flags & IORESOURCE_MEM) && + !pnv_pci_is_mem_pref_64(res->flags)) { region.start = res->start - hose->mem_offset[0] - phb->ioda.m32_pci_base; diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index 1c4c5accd220..d3236c9e226b 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c @@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end)); + unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; + unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); /* Always save lowcore pages (LC protection might be enabled). */ if (pfn <= LC_PAGES) @@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn) if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) return 1; /* Skip memory holes and read-only pages (NSS, DCSS, ...). */ + if (pfn >= stext_pfn && pfn <= eshared_pfn) + return ipl_info.type == IPL_TYPE_NSS ? 1 : 0; if (tprot(PFN_PHYS(pfn))) return 1; return 0; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index f512cffbf84e..07da75c385e6 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include "kvm-s390.h" @@ -1131,7 +1132,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, if ((!schid && !cr6) || (schid && cr6)) return NULL; - mutex_lock(&kvm->lock); fi = &kvm->arch.float_int; spin_lock(&fi->lock); inti = NULL; @@ -1159,7 +1159,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, if (list_empty(&fi->list)) atomic_set(&fi->active, 0); spin_unlock(&fi->lock); - mutex_unlock(&kvm->lock); return inti; } @@ -1172,7 +1171,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) int sigcpu; int rc = 0; - mutex_lock(&kvm->lock); fi = &kvm->arch.float_int; spin_lock(&fi->lock); if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) { @@ -1225,7 +1223,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); unlock_fi: spin_unlock(&fi->lock); - mutex_unlock(&kvm->lock); return rc; } @@ -1287,10 +1284,10 @@ int kvm_s390_inject_vm(struct kvm *kvm, return rc; } -void kvm_s390_reinject_io_int(struct kvm *kvm, +int kvm_s390_reinject_io_int(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) { - __inject_vm(kvm, inti); + return __inject_vm(kvm, inti); } int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, @@ -1379,7 +1376,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm) struct kvm_s390_float_interrupt *fi; struct kvm_s390_interrupt_info *n, *inti = NULL; - mutex_lock(&kvm->lock); fi = &kvm->arch.float_int; spin_lock(&fi->lock); list_for_each_entry_safe(inti, n, &fi->list, list) { @@ -1389,66 +1385,68 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm) fi->irq_count = 0; atomic_set(&fi->active, 0); spin_unlock(&fi->lock); - mutex_unlock(&kvm->lock); } -static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti, - u8 *addr) +static void inti_to_irq(struct kvm_s390_interrupt_info *inti, + struct kvm_s390_irq *irq) { - struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; - struct kvm_s390_irq irq = {0}; - - irq.type = inti->type; + irq->type = inti->type; switch (inti->type) { case KVM_S390_INT_PFAULT_INIT: case KVM_S390_INT_PFAULT_DONE: case KVM_S390_INT_VIRTIO: case KVM_S390_INT_SERVICE: - irq.u.ext = inti->ext; + irq->u.ext = inti->ext; break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - irq.u.io = inti->io; + irq->u.io = inti->io; break; case KVM_S390_MCHK: - irq.u.mchk = inti->mchk; + irq->u.mchk = inti->mchk; break; - default: - return -EINVAL; } - - if (copy_to_user(uptr, &irq, sizeof(irq))) - return -EFAULT; - - return 0; } -static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) +static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) { struct kvm_s390_interrupt_info *inti; struct kvm_s390_float_interrupt *fi; + struct kvm_s390_irq *buf; + int max_irqs; int ret = 0; int n = 0; - mutex_lock(&kvm->lock); + if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0) + return -EINVAL; + + /* + * We are already using -ENOMEM to signal + * userspace it may retry with a bigger buffer, + * so we need to use something else for this case + */ + buf = vzalloc(len); + if (!buf) + return -ENOBUFS; + + max_irqs = len / sizeof(struct kvm_s390_irq); + fi = &kvm->arch.float_int; spin_lock(&fi->lock); - list_for_each_entry(inti, &fi->list, list) { - if (len < sizeof(struct kvm_s390_irq)) { + if (n == max_irqs) { /* signal userspace to try again */ ret = -ENOMEM; break; } - ret = copy_irq_to_user(inti, buf); - if (ret) - break; - buf += sizeof(struct kvm_s390_irq); - len -= sizeof(struct kvm_s390_irq); + inti_to_irq(inti, &buf[n]); n++; } - spin_unlock(&fi->lock); - mutex_unlock(&kvm->lock); + if (!ret && n > 0) { + if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) + ret = -EFAULT; + } + vfree(buf); return ret < 0 ? ret : n; } @@ -1459,7 +1457,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) switch (attr->group) { case KVM_DEV_FLIC_GET_ALL_IRQS: - r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr, + r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr, attr->attr); break; default: diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index a8f3d9b71c11..2c1ea12ed55d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -146,8 +146,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, u64 cr6, u64 schid); -void kvm_s390_reinject_io_int(struct kvm *kvm, - struct kvm_s390_interrupt_info *inti); +int kvm_s390_reinject_io_int(struct kvm *kvm, + struct kvm_s390_interrupt_info *inti); int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); /* implemented in intercept.c */ diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 1be578d64dfc..1edbf953cf0e 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -229,18 +229,19 @@ static int handle_tpi(struct kvm_vcpu *vcpu) struct kvm_s390_interrupt_info *inti; unsigned long len; u32 tpi_data[3]; - int cc, rc; + int rc; u64 addr; - rc = 0; addr = kvm_s390_get_base_disp_s(vcpu); if (addr & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - cc = 0; + inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); - if (!inti) - goto no_interrupt; - cc = 1; + if (!inti) { + kvm_s390_set_psw_cc(vcpu, 0); + return 0; + } + tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; tpi_data[1] = inti->io.io_int_parm; tpi_data[2] = inti->io.io_int_word; @@ -251,30 +252,38 @@ static int handle_tpi(struct kvm_vcpu *vcpu) */ len = sizeof(tpi_data) - 4; rc = write_guest(vcpu, addr, &tpi_data, len); - if (rc) - return kvm_s390_inject_prog_cond(vcpu, rc); + if (rc) { + rc = kvm_s390_inject_prog_cond(vcpu, rc); + goto reinject_interrupt; + } } else { /* * Store the three-word I/O interruption code into * the appropriate lowcore area. */ len = sizeof(tpi_data); - if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) + if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { + /* failed writes to the low core are not recoverable */ rc = -EFAULT; + goto reinject_interrupt; + } } + + /* irq was successfully handed to the guest */ + kfree(inti); + kvm_s390_set_psw_cc(vcpu, 1); + return 0; +reinject_interrupt: /* * If we encounter a problem storing the interruption code, the * instruction is suppressed from the guest's view: reinject the * interrupt. */ - if (!rc) + if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { kfree(inti); - else - kvm_s390_reinject_io_int(vcpu->kvm, inti); -no_interrupt: - /* Set condition code and we're done. */ - if (!rc) - kvm_s390_set_psw_cc(vcpu, cc); + rc = -EFAULT; + } + /* don't set the cc, a pgm irq was injected or we drop to user space */ return rc ? -EFAULT : 0; } @@ -462,6 +471,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) for (n = mem->count - 1; n > 0 ; n--) memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); + memset(&mem->vm[0], 0, sizeof(mem->vm[0])); mem->vm[0].cpus_total = cpus; mem->vm[0].cpus_configured = cpus; mem->vm[0].cpus_standby = 0; diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index 47f29b1d1846..e7814b74caf8 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -69,7 +69,7 @@ struct insn { const insn_byte_t *next_byte; }; -#define MAX_INSN_SIZE 16 +#define MAX_INSN_SIZE 15 #define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6) #define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3) diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index a1410db38a1a..653dfa7662e1 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -30,6 +30,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx) :: "a" (eax), "c" (ecx)); } +static inline void __sti_mwait(unsigned long eax, unsigned long ecx) +{ + trace_hardirqs_on(); + /* "mwait %eax, %ecx;" */ + asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); +} + /* * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, * which can obviate IPI to trigger checking of need_resched. diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 073983398364..666bcf14ce10 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -557,6 +557,8 @@ struct event_constraint intel_core2_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01), EVENT_CONSTRAINT_END }; @@ -564,6 +566,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01), EVENT_CONSTRAINT_END }; @@ -587,6 +591,8 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), EVENT_CONSTRAINT_END }; @@ -602,6 +608,8 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), EVENT_CONSTRAINT_END }; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e127ddaa2d5a..6ad8a6396b75 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -398,6 +399,53 @@ static void amd_e400_idle(void) default_idle(); } +/* + * Intel Core2 and older machines prefer MWAIT over HALT for C1. + * We can't rely on cpuidle installing MWAIT, because it will not load + * on systems that support only C1 -- so the boot default must be MWAIT. + * + * Some AMD machines are the opposite, they depend on using HALT. + * + * So for default C1, which is used during boot until cpuidle loads, + * use MWAIT-C1 on Intel HW that has it, else use HALT. + */ +static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) +{ + if (c->x86_vendor != X86_VENDOR_INTEL) + return 0; + + if (!cpu_has(c, X86_FEATURE_MWAIT)) + return 0; + + return 1; +} + +/* + * MONITOR/MWAIT with no hints, used for default default C1 state. + * This invokes MWAIT with interrutps enabled and no flags, + * which is backwards compatible with the original MWAIT implementation. + */ + +static void mwait_idle(void) +{ + if (!current_set_polling_and_test()) { + if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { + smp_mb(); /* quirk */ + clflush((void *)¤t_thread_info()->flags); + smp_mb(); /* quirk */ + } + + __monitor((void *)¤t_thread_info()->flags, 0, 0); + if (!need_resched()) + __sti_mwait(0, 0); + else + local_irq_enable(); + } else { + local_irq_enable(); + } + __current_clr_polling(); +} + void select_idle_routine(const struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP @@ -411,6 +459,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c) /* E400: APIC timer interrupt does not wake up CPU from C1e */ pr_info("using AMD E400 aware idle routine\n"); x86_idle = amd_e400_idle; + } else if (prefer_mwait_c1_over_halt(c)) { + pr_info("using mwait in idle threads\n"); + x86_idle = mwait_idle; } else x86_idle = default_idle; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 64d76c102230..5ec6a9969581 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5781,7 +5781,6 @@ int kvm_arch_init(void *opaque) kvm_set_mmio_spte_mask(); kvm_x86_ops = ops; - kvm_init_msr_list(); kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, PT_DIRTY_MASK, PT64_NX_MASK, 0); @@ -7218,7 +7217,14 @@ void kvm_arch_hardware_disable(void) int kvm_arch_hardware_setup(void) { - return kvm_x86_ops->hardware_setup(); + int r; + + r = kvm_x86_ops->hardware_setup(); + if (r != 0) + return r; + + kvm_init_msr_list(); + return 0; } void kvm_arch_hardware_unsetup(void) diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 1313ae6b478b..85994f5d48e4 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -52,6 +52,13 @@ */ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64) { + /* + * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid + * even if the input buffer is long enough to hold them. + */ + if (buf_len > MAX_INSN_SIZE) + buf_len = MAX_INSN_SIZE; + memset(insn, 0, sizeof(*insn)); insn->kaddr = kaddr; insn->end_kaddr = kaddr + buf_len; diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index e31d4949124a..87be10e8b57a 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -428,6 +428,36 @@ config DEFAULT_MEM_SIZE If unsure, leave the default value here. +config XTFPGA_LCD + bool "Enable XTFPGA LCD driver" + depends on XTENSA_PLATFORM_XTFPGA + default n + help + There's a 2x16 LCD on most of XTFPGA boards, kernel may output + progress messages there during bootup/shutdown. It may be useful + during board bringup. + + If unsure, say N. + +config XTFPGA_LCD_BASE_ADDR + hex "XTFPGA LCD base address" + depends on XTFPGA_LCD + default "0x0d0c0000" + help + Base address of the LCD controller inside KIO region. + Different boards from XTFPGA family have LCD controller at different + addresses. Please consult prototyping user guide for your board for + the correct address. Wrong address here may lead to hardware lockup. + +config XTFPGA_LCD_8BIT_ACCESS + bool "Use 8-bit access to XTFPGA LCD" + depends on XTFPGA_LCD + default n + help + LCD may be connected with 4- or 8-bit interface, 8-bit access may + only be used with 8-bit interface. Please consult prototyping user + guide for your board for the correct interface width. + endmenu menu "Executable file formats" diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index db5bb72e2f4e..62d84657c60b 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h @@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6) __SYSCALL(324, sys_name_to_handle_at, 5) #define __NR_open_by_handle_at 325 __SYSCALL(325, sys_open_by_handle_at, 3) -#define __NR_sync_file_range 326 +#define __NR_sync_file_range2 326 __SYSCALL(326, sys_sync_file_range2, 6) #define __NR_perf_event_open 327 __SYSCALL(327, sys_perf_event_open, 5) diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index d05f8feeb8d7..17b1ef3232e4 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c @@ -349,8 +349,8 @@ static void iss_net_timer(unsigned long priv) { struct iss_net_private *lp = (struct iss_net_private *)priv; - spin_lock(&lp->lock); iss_net_poll(); + spin_lock(&lp->lock); mod_timer(&lp->timer, jiffies + lp->timer_val); spin_unlock(&lp->lock); } @@ -361,7 +361,7 @@ static int iss_net_open(struct net_device *dev) struct iss_net_private *lp = netdev_priv(dev); int err; - spin_lock(&lp->lock); + spin_lock_bh(&lp->lock); err = lp->tp.open(lp); if (err < 0) @@ -376,9 +376,11 @@ static int iss_net_open(struct net_device *dev) while ((err = iss_net_rx(dev)) > 0) ; - spin_lock(&opened_lock); + spin_unlock_bh(&lp->lock); + spin_lock_bh(&opened_lock); list_add(&lp->opened_list, &opened); - spin_unlock(&opened_lock); + spin_unlock_bh(&opened_lock); + spin_lock_bh(&lp->lock); init_timer(&lp->timer); lp->timer_val = ISS_NET_TIMER_VALUE; @@ -387,7 +389,7 @@ static int iss_net_open(struct net_device *dev) mod_timer(&lp->timer, jiffies + lp->timer_val); out: - spin_unlock(&lp->lock); + spin_unlock_bh(&lp->lock); return err; } @@ -395,7 +397,7 @@ static int iss_net_close(struct net_device *dev) { struct iss_net_private *lp = netdev_priv(dev); netif_stop_queue(dev); - spin_lock(&lp->lock); + spin_lock_bh(&lp->lock); spin_lock(&opened_lock); list_del(&opened); @@ -405,18 +407,17 @@ static int iss_net_close(struct net_device *dev) lp->tp.close(lp); - spin_unlock(&lp->lock); + spin_unlock_bh(&lp->lock); return 0; } static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct iss_net_private *lp = netdev_priv(dev); - unsigned long flags; int len; netif_stop_queue(dev); - spin_lock_irqsave(&lp->lock, flags); + spin_lock_bh(&lp->lock); len = lp->tp.write(lp, &skb); @@ -438,7 +439,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev) pr_err("%s: %s failed(%d)\n", dev->name, __func__, len); } - spin_unlock_irqrestore(&lp->lock, flags); + spin_unlock_bh(&lp->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; @@ -466,9 +467,9 @@ static int iss_net_set_mac(struct net_device *dev, void *addr) if (!is_valid_ether_addr(hwaddr->sa_data)) return -EADDRNOTAVAIL; - spin_lock(&lp->lock); + spin_lock_bh(&lp->lock); memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN); - spin_unlock(&lp->lock); + spin_unlock_bh(&lp->lock); return 0; } @@ -520,11 +521,11 @@ static int iss_net_configure(int index, char *init) *lp = (struct iss_net_private) { .device_list = LIST_HEAD_INIT(lp->device_list), .opened_list = LIST_HEAD_INIT(lp->opened_list), - .lock = __SPIN_LOCK_UNLOCKED(lp.lock), .dev = dev, .index = index, - }; + }; + spin_lock_init(&lp->lock); /* * If this name ends up conflicting with an existing registered * netdevice, that is OK, register_netdev{,ice}() will notice this diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile index b9ae206340cd..7839d38b2337 100644 --- a/arch/xtensa/platforms/xtfpga/Makefile +++ b/arch/xtensa/platforms/xtfpga/Makefile @@ -6,4 +6,5 @@ # # Note 2! The CFLAGS definitions are in the main makefile... -obj-y = setup.o lcd.o +obj-y += setup.o +obj-$(CONFIG_XTFPGA_LCD) += lcd.o diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h index 6edd20bb4565..4e0af2662a21 100644 --- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h +++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h @@ -40,9 +40,6 @@ /* UART */ #define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020) -/* LCD instruction and data addresses. */ -#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000)) -#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004)) /* Misc. */ #define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000) diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h index 0e435645af5a..4c8541ed1139 100644 --- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h +++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h @@ -11,10 +11,25 @@ #ifndef __XTENSA_XTAVNET_LCD_H #define __XTENSA_XTAVNET_LCD_H +#ifdef CONFIG_XTFPGA_LCD /* Display string STR at position POS on the LCD. */ void lcd_disp_at_pos(char *str, unsigned char pos); /* Shift the contents of the LCD display left or right. */ void lcd_shiftleft(void); void lcd_shiftright(void); +#else +static inline void lcd_disp_at_pos(char *str, unsigned char pos) +{ +} + +static inline void lcd_shiftleft(void) +{ +} + +static inline void lcd_shiftright(void) +{ +} +#endif + #endif diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c index 2872301598df..4dc0c1b43f4b 100644 --- a/arch/xtensa/platforms/xtfpga/lcd.c +++ b/arch/xtensa/platforms/xtfpga/lcd.c @@ -1,50 +1,63 @@ /* - * Driver for the LCD display on the Tensilica LX60 Board. + * Driver for the LCD display on the Tensilica XTFPGA board family. + * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001, 2006 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. */ -/* - * - * FIXME: this code is from the examples from the LX60 user guide. - * - * The lcd_pause function does busy waiting, which is probably not - * great. Maybe the code could be changed to use kernel timers, or - * change the hardware to not need to wait. - */ - +#include #include #include #include #include -#include -#define LCD_PAUSE_ITERATIONS 4000 +/* LCD instruction and data addresses. */ +#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR)) +#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4) + #define LCD_CLEAR 0x1 #define LCD_DISPLAY_ON 0xc /* 8bit and 2 lines display */ #define LCD_DISPLAY_MODE8BIT 0x38 +#define LCD_DISPLAY_MODE4BIT 0x28 #define LCD_DISPLAY_POS 0x80 #define LCD_SHIFT_LEFT 0x18 #define LCD_SHIFT_RIGHT 0x1c +static void lcd_put_byte(u8 *addr, u8 data) +{ +#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS + ACCESS_ONCE(*addr) = data; +#else + ACCESS_ONCE(*addr) = data & 0xf0; + ACCESS_ONCE(*addr) = (data << 4) & 0xf0; +#endif +} + static int __init lcd_init(void) { - *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT; + ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; mdelay(5); - *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT; + ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; udelay(200); - *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT; + ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; + udelay(50); +#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS + ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT; + udelay(50); + lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT); udelay(50); - *LCD_INSTR_ADDR = LCD_DISPLAY_ON; +#endif + lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON); udelay(50); - *LCD_INSTR_ADDR = LCD_CLEAR; + lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR); mdelay(10); lcd_disp_at_pos("XTENSA LINUX", 0); return 0; @@ -52,10 +65,10 @@ static int __init lcd_init(void) void lcd_disp_at_pos(char *str, unsigned char pos) { - *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos; + lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos); udelay(100); while (*str != 0) { - *LCD_DATA_ADDR = *str; + lcd_put_byte(LCD_DATA_ADDR, *str); udelay(200); str++; } @@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos) void lcd_shiftleft(void) { - *LCD_INSTR_ADDR = LCD_SHIFT_LEFT; + lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT); udelay(50); } void lcd_shiftright(void) { - *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT; + lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT); udelay(50); } diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 755b90c40ddf..c0b39f304ea3 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -346,7 +346,6 @@ acpi_tb_install_standard_table(acpi_physical_address address, */ acpi_tb_uninstall_table(&new_table_desc); *table_index = i; - (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); return_ACPI_STATUS(AE_OK); } } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index dc4d8960684a..6b69f7bf2500 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -298,7 +298,11 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent) struct acpi_device_physical_node *pn; bool offline = true; - mutex_lock(&adev->physical_node_lock); + /* + * acpi_container_offline() calls this for all of the container's + * children under the container's physical_node_lock lock. + */ + mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING); list_for_each_entry(pn, &adev->physical_node_list, node) if (device_supports_offline(pn->dev) && !pn->dev->offline) { diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 876bae5ade33..79bc203f51ef 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -515,11 +515,11 @@ int bus_add_device(struct device *dev) goto out_put; error = device_add_groups(dev, bus->dev_groups); if (error) - goto out_groups; + goto out_id; error = sysfs_create_link(&bus->p->devices_kset->kobj, &dev->kobj, dev_name(dev)); if (error) - goto out_id; + goto out_groups; error = sysfs_create_link(&dev->kobj, &dev->bus->p->subsys.kobj, "subsystem"); if (error) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 6e64563361f0..9c2ba1c97c42 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -62,15 +62,21 @@ static int cache_setup_of_node(unsigned int cpu) return -ENOENT; } - while (np && index < cache_leaves(cpu)) { + while (index < cache_leaves(cpu)) { this_leaf = this_cpu_ci->info_list + index; if (this_leaf->level != 1) np = of_find_next_cache_node(np); else np = of_node_get(np);/* cpu node itself */ + if (!np) + break; this_leaf->of_node = np; index++; } + + if (index != cache_leaves(cpu)) /* not all OF nodes populated */ + return -ENOENT; + return 0; } @@ -189,8 +195,11 @@ static int detect_cache_attributes(unsigned int cpu) * will be set up here only if they are not populated already */ ret = cache_shared_cpu_map_setup(cpu); - if (ret) + if (ret) { + pr_warn("Unable to detect cache hierarcy from DT for CPU %d\n", + cpu); goto free_ci; + } return 0; free_ci: diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 9421fed40905..e68ab79df28b 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -101,6 +101,15 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) } r = platform_get_resource(dev, IORESOURCE_IRQ, num); + /* + * The resources may pass trigger flags to the irqs that need + * to be set up. It so happens that the trigger flags for + * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* + * settings. + */ + if (r && r->flags & IORESOURCE_BITS) + irqd_set_trigger_type(irq_get_irq_data(r->start), + r->flags & IORESOURCE_BITS); return r ? r->start : -ENXIO; #endif diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index de4c8499cbac..288547a3c566 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -65,6 +65,7 @@ static const struct usb_device_id ath3k_table[] = { /* Atheros AR3011 with sflash firmware*/ { USB_DEVICE(0x0489, 0xE027) }, { USB_DEVICE(0x0489, 0xE03D) }, + { USB_DEVICE(0x04F2, 0xAFF1) }, { USB_DEVICE(0x0930, 0x0215) }, { USB_DEVICE(0x0CF3, 0x3002) }, { USB_DEVICE(0x0CF3, 0xE019) }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index c91ec52a8948..b0449bb18873 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -153,6 +153,7 @@ static const struct usb_device_id blacklist_table[] = { /* Atheros 3011 with sflash firmware */ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE }, diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c index a23ac0c724f0..0b7c3e8840ba 100644 --- a/drivers/clk/at91/clk-usb.c +++ b/drivers/clk/at91/clk-usb.c @@ -56,22 +56,55 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw, return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1)); } -static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) +static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, + unsigned long *best_parent_rate, + struct clk_hw **best_parent_hw) { - unsigned long div; + struct clk *parent = NULL; + long best_rate = -EINVAL; + unsigned long tmp_rate; + int best_diff = -1; + int tmp_diff; + int i; - if (!rate) - return -EINVAL; + for (i = 0; i < __clk_get_num_parents(hw->clk); i++) { + int div; - if (rate >= *parent_rate) - return *parent_rate; + parent = clk_get_parent_by_index(hw->clk, i); + if (!parent) + continue; + + for (div = 1; div < SAM9X5_USB_MAX_DIV + 2; div++) { + unsigned long tmp_parent_rate; + + tmp_parent_rate = rate * div; + tmp_parent_rate = __clk_round_rate(parent, + tmp_parent_rate); + tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div); + if (tmp_rate < rate) + tmp_diff = rate - tmp_rate; + else + tmp_diff = tmp_rate - rate; + + if (best_diff < 0 || best_diff > tmp_diff) { + best_rate = tmp_rate; + best_diff = tmp_diff; + *best_parent_rate = tmp_parent_rate; + *best_parent_hw = __clk_get_hw(parent); + } + + if (!best_diff || tmp_rate < rate) + break; + } - div = DIV_ROUND_CLOSEST(*parent_rate, rate); - if (div > SAM9X5_USB_MAX_DIV + 1) - div = SAM9X5_USB_MAX_DIV + 1; + if (!best_diff) + break; + } - return DIV_ROUND_CLOSEST(*parent_rate, div); + return best_rate; } static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index) @@ -121,7 +154,7 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate, static const struct clk_ops at91sam9x5_usb_ops = { .recalc_rate = at91sam9x5_clk_usb_recalc_rate, - .round_rate = at91sam9x5_clk_usb_round_rate, + .determine_rate = at91sam9x5_clk_usb_determine_rate, .get_parent = at91sam9x5_clk_usb_get_parent, .set_parent = at91sam9x5_clk_usb_set_parent, .set_rate = at91sam9x5_clk_usb_set_rate, @@ -159,7 +192,7 @@ static const struct clk_ops at91sam9n12_usb_ops = { .disable = at91sam9n12_clk_usb_disable, .is_enabled = at91sam9n12_clk_usb_is_enabled, .recalc_rate = at91sam9x5_clk_usb_recalc_rate, - .round_rate = at91sam9x5_clk_usb_round_rate, + .determine_rate = at91sam9x5_clk_usb_determine_rate, .set_rate = at91sam9x5_clk_usb_set_rate, }; @@ -179,7 +212,8 @@ at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name, init.ops = &at91sam9x5_usb_ops; init.parent_names = parent_names; init.num_parents = num_parents; - init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE; + init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | + CLK_SET_RATE_PARENT; usb->hw.init = &init; usb->pmc = pmc; @@ -207,7 +241,7 @@ at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name, init.ops = &at91sam9n12_usb_ops; init.parent_names = &parent_name; init.num_parents = 1; - init.flags = CLK_SET_RATE_GATE; + init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT; usb->hw.init = &init; usb->pmc = pmc; diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 08b8b3729f53..4fe9c01a14b4 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -242,7 +242,7 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK; cfg = f->pre_div << CFG_SRC_DIV_SHIFT; cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT; - if (rcg->mnd_width && f->n) + if (rcg->mnd_width && f->n && (f->m != f->n)) cfg |= CFG_MODE_DUAL_EDGE; ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, mask, cfg); diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index afed5eb0691e..2d12fee408a2 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c @@ -514,8 +514,8 @@ static struct freq_tbl clk_tbl_gsbi_qup[] = { { 10800000, P_PXO, 1, 2, 5 }, { 15060000, P_PLL8, 1, 2, 51 }, { 24000000, P_PLL8, 4, 1, 4 }, + { 25000000, P_PXO, 1, 0, 0 }, { 25600000, P_PLL8, 1, 1, 15 }, - { 27000000, P_PXO, 1, 0, 0 }, { 48000000, P_PLL8, 4, 1, 2 }, { 51200000, P_PLL8, 1, 2, 15 }, { } diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 88e8c6bbd77f..4bc7ccd7e494 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -1354,7 +1354,7 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = { VPLL_LOCK, VPLL_CON0, NULL), }; -static void __init exynos4_core_down_clock(enum exynos4_soc soc) +static void __init exynos4x12_core_down_clock(void) { unsigned int tmp; @@ -1373,11 +1373,9 @@ static void __init exynos4_core_down_clock(enum exynos4_soc soc) __raw_writel(tmp, reg_base + PWR_CTRL1); /* - * Disable the clock up feature on Exynos4x12, in case it was - * enabled by bootloader. + * Disable the clock up feature in case it was enabled by bootloader. */ - if (exynos4_soc == EXYNOS4X12) - __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2); + __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2); } /* register exynos4 clocks */ @@ -1474,7 +1472,8 @@ static void __init exynos4_clk_init(struct device_node *np, samsung_clk_register_alias(ctx, exynos4_aliases, ARRAY_SIZE(exynos4_aliases)); - exynos4_core_down_clock(soc); + if (soc == EXYNOS4X12) + exynos4x12_core_down_clock(); exynos4_clk_sleep_init(); samsung_clk_of_add_provider(np, ctx); diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c index 97dc8595c3cd..c51f7c84c163 100644 --- a/drivers/clk/tegra/clk.c +++ b/drivers/clk/tegra/clk.c @@ -272,7 +272,7 @@ void __init tegra_add_of_provider(struct device_node *np) of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); rst_ctlr.of_node = np; - rst_ctlr.nr_resets = clk_num * 32; + rst_ctlr.nr_resets = periph_banks * 32; reset_controller_register(&rst_ctlr); } diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index f79dd410dede..af3b77b14835 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -554,15 +554,23 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) return err; } -static int omap_aes_check_aligned(struct scatterlist *sg) +static int omap_aes_check_aligned(struct scatterlist *sg, int total) { + int len = 0; + while (sg) { if (!IS_ALIGNED(sg->offset, 4)) return -1; if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) return -1; + + len += sg->length; sg = sg_next(sg); } + + if (len != total) + return -1; + return 0; } @@ -633,8 +641,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, dd->in_sg = req->src; dd->out_sg = req->dst; - if (omap_aes_check_aligned(dd->in_sg) || - omap_aes_check_aligned(dd->out_sg)) { + if (omap_aes_check_aligned(dd->in_sg, dd->total) || + omap_aes_check_aligned(dd->out_sg, dd->total)) { if (omap_aes_copy_sgs(dd)) pr_err("Failed to copy SGs for unaligned cases\n"); dd->sgs_copied = 1; diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 7bc3e9b288f3..1be85afe4a00 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -313,11 +313,13 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct mvebu_gpio_chip *mvchip = gc->private; + struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = 1 << (d->irq - gc->irq_base); irq_gc_lock(gc); - gc->mask_cache &= ~mask; - writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip)); + ct->mask_cache_priv &= ~mask; + + writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip)); irq_gc_unlock(gc); } @@ -325,11 +327,13 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct mvebu_gpio_chip *mvchip = gc->private; + struct irq_chip_type *ct = irq_data_get_chip_type(d); + u32 mask = 1 << (d->irq - gc->irq_base); irq_gc_lock(gc); - gc->mask_cache |= mask; - writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip)); + ct->mask_cache_priv |= mask; + writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip)); irq_gc_unlock(gc); } @@ -337,11 +341,13 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct mvebu_gpio_chip *mvchip = gc->private; + struct irq_chip_type *ct = irq_data_get_chip_type(d); + u32 mask = 1 << (d->irq - gc->irq_base); irq_gc_lock(gc); - gc->mask_cache &= ~mask; - writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip)); + ct->mask_cache_priv &= ~mask; + writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip)); irq_gc_unlock(gc); } @@ -349,11 +355,13 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct mvebu_gpio_chip *mvchip = gc->private; + struct irq_chip_type *ct = irq_data_get_chip_type(d); + u32 mask = 1 << (d->irq - gc->irq_base); irq_gc_lock(gc); - gc->mask_cache |= mask; - writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip)); + ct->mask_cache_priv |= mask; + writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip)); irq_gc_unlock(gc); } diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c index faf1c0c5ab2e..e5b19c8a0265 100644 --- a/drivers/gpu/drm/i2c/adv7511.c +++ b/drivers/gpu/drm/i2c/adv7511.c @@ -33,6 +33,7 @@ struct adv7511 { unsigned int current_edid_segment; uint8_t edid_buf[256]; + bool edid_read; wait_queue_head_t wq; struct drm_encoder *encoder; @@ -379,69 +380,71 @@ static bool adv7511_hpd(struct adv7511 *adv7511) return false; } -static irqreturn_t adv7511_irq_handler(int irq, void *devid) -{ - struct adv7511 *adv7511 = devid; - - if (adv7511_hpd(adv7511)) - drm_helper_hpd_irq_event(adv7511->encoder->dev); - - wake_up_all(&adv7511->wq); - - return IRQ_HANDLED; -} - -static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511, - unsigned int irq) +static int adv7511_irq_process(struct adv7511 *adv7511) { unsigned int irq0, irq1; - unsigned int pending; int ret; ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0); if (ret < 0) - return 0; + return ret; + ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1); if (ret < 0) - return 0; + return ret; + + regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0); + regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); + + if (irq0 & ADV7511_INT0_HDP) + drm_helper_hpd_irq_event(adv7511->encoder->dev); + + if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { + adv7511->edid_read = true; + + if (adv7511->i2c_main->irq) + wake_up_all(&adv7511->wq); + } + + return 0; +} - pending = (irq1 << 8) | irq0; +static irqreturn_t adv7511_irq_handler(int irq, void *devid) +{ + struct adv7511 *adv7511 = devid; + int ret; - return pending & irq; + ret = adv7511_irq_process(adv7511); + return ret < 0 ? IRQ_NONE : IRQ_HANDLED; } -static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq, - int timeout) +/* ----------------------------------------------------------------------------- + * EDID retrieval + */ + +static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout) { - unsigned int pending; int ret; if (adv7511->i2c_main->irq) { ret = wait_event_interruptible_timeout(adv7511->wq, - adv7511_is_interrupt_pending(adv7511, irq), - msecs_to_jiffies(timeout)); - if (ret <= 0) - return 0; - pending = adv7511_is_interrupt_pending(adv7511, irq); + adv7511->edid_read, msecs_to_jiffies(timeout)); } else { - if (timeout < 25) - timeout = 25; - do { - pending = adv7511_is_interrupt_pending(adv7511, irq); - if (pending) + for (; timeout > 0; timeout -= 25) { + ret = adv7511_irq_process(adv7511); + if (ret < 0) break; + + if (adv7511->edid_read) + break; + msleep(25); - timeout -= 25; - } while (timeout >= 25); + } } - return pending; + return adv7511->edid_read ? 0 : -EIO; } -/* ----------------------------------------------------------------------------- - * EDID retrieval - */ - static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) { @@ -463,19 +466,14 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block, return ret; if (status != 2) { + adv7511->edid_read = false; regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT, block); - ret = adv7511_wait_for_interrupt(adv7511, - ADV7511_INT0_EDID_READY | - ADV7511_INT1_DDC_ERROR, 200); - - if (!(ret & ADV7511_INT0_EDID_READY)) - return -EIO; + ret = adv7511_wait_for_edid(adv7511, 200); + if (ret < 0) + return ret; } - regmap_write(adv7511->regmap, ADV7511_REG_INT(0), - ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR); - /* Break this apart, hopefully more I2C controllers will * support 64 byte transfers than 256 byte transfers */ @@ -528,7 +526,9 @@ static int adv7511_get_modes(struct drm_encoder *encoder, /* Reading the EDID only works if the device is powered */ if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) { regmap_write(adv7511->regmap, ADV7511_REG_INT(0), - ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR); + ADV7511_INT0_EDID_READY); + regmap_write(adv7511->regmap, ADV7511_REG_INT(1), + ADV7511_INT1_DDC_ERROR); regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, 0); adv7511->current_edid_segment = -1; @@ -563,7 +563,9 @@ static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode) adv7511->current_edid_segment = -1; regmap_write(adv7511->regmap, ADV7511_REG_INT(0), - ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR); + ADV7511_INT0_EDID_READY); + regmap_write(adv7511->regmap, ADV7511_REG_INT(1), + ADV7511_INT1_DDC_ERROR); regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, 0); /* diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 391d47e1f131..34d0e58f6421 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1040,7 +1040,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4); s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); - s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); + s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); s->ecochk = I915_READ(GAM_ECOCHK); @@ -1122,7 +1122,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]); I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); - I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count); + I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); I915_WRITE(GAM_ECOCHK, s->ecochk); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1464bc1f8943..247cc18a6f9b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3717,14 +3717,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev) ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); I915_WRITE16(IMR, dev_priv->irq_mask); I915_WRITE16(IER, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | I915_USER_INTERRUPT); POSTING_READ16(IER); @@ -3886,14 +3884,12 @@ static int i915_irq_postinstall(struct drm_device *dev) I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); enable_mask = I915_ASLE_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | I915_USER_INTERRUPT; if (I915_HAS_HOTPLUG(dev)) { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 172de3b3433b..6e7a6f03fca3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1713,6 +1713,7 @@ enum punit_power_well { #define GMBUS_CYCLE_INDEX (2<<25) #define GMBUS_CYCLE_STOP (4<<25) #define GMBUS_BYTE_COUNT_SHIFT 16 +#define GMBUS_BYTE_COUNT_MAX 256U #define GMBUS_SLAVE_INDEX_SHIFT 8 #define GMBUS_SLAVE_ADDR_SHIFT 1 #define GMBUS_SLAVE_READ (1<<0) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index b31088a551f2..56e437e31580 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -270,18 +270,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) } static int -gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, - u32 gmbus1_index) +gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, + unsigned short addr, u8 *buf, unsigned int len, + u32 gmbus1_index) { int reg_offset = dev_priv->gpio_mmio_base; - u16 len = msg->len; - u8 *buf = msg->buf; I915_WRITE(GMBUS1 + reg_offset, gmbus1_index | GMBUS_CYCLE_WAIT | (len << GMBUS_BYTE_COUNT_SHIFT) | - (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | + (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); while (len) { int ret; @@ -303,11 +302,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, } static int -gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) +gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, + u32 gmbus1_index) { - int reg_offset = dev_priv->gpio_mmio_base; - u16 len = msg->len; u8 *buf = msg->buf; + unsigned int rx_size = msg->len; + unsigned int len; + int ret; + + do { + len = min(rx_size, GMBUS_BYTE_COUNT_MAX); + + ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, + buf, len, gmbus1_index); + if (ret) + return ret; + + rx_size -= len; + buf += len; + } while (rx_size != 0); + + return 0; +} + +static int +gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, + unsigned short addr, u8 *buf, unsigned int len) +{ + int reg_offset = dev_priv->gpio_mmio_base; + unsigned int chunk_size = len; u32 val, loop; val = loop = 0; @@ -319,8 +342,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) I915_WRITE(GMBUS3 + reg_offset, val); I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT | - (msg->len << GMBUS_BYTE_COUNT_SHIFT) | - (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | + (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | + (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); while (len) { int ret; @@ -337,6 +360,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) if (ret) return ret; } + + return 0; +} + +static int +gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) +{ + u8 *buf = msg->buf; + unsigned int tx_size = msg->len; + unsigned int len; + int ret; + + do { + len = min(tx_size, GMBUS_BYTE_COUNT_MAX); + + ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len); + if (ret) + return ret; + + buf += len; + tx_size -= len; + } while (tx_size != 0); + return 0; } diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 86807ee91bd1..9bd56116fd5a 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, misc |= ATOM_COMPOSITESYNC; if (mode->flags & DRM_MODE_FLAG_INTERLACE) misc |= ATOM_INTERLACE; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + if (mode->flags & DRM_MODE_FLAG_DBLCLK) misc |= ATOM_DOUBLE_CLOCK_MODE; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2; args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = radeon_crtc->crtc_id; @@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, misc |= ATOM_COMPOSITESYNC; if (mode->flags & DRM_MODE_FLAG_INTERLACE) misc |= ATOM_INTERLACE; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + if (mode->flags & DRM_MODE_FLAG_DBLCLK) misc |= ATOM_DOUBLE_CLOCK_MODE; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2; args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = radeon_crtc->crtc_id; diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 433f72a1c006..995e2a0cf096 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -135,7 +135,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, GFP_KERNEL); if (!open_info) { err = -ENOMEM; - goto error0; + goto error_gpadl; } init_completion(&open_info->waitevent); @@ -151,7 +151,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, if (userdatalen > MAX_USER_DEFINED_BYTES) { err = -EINVAL; - goto error0; + goto error_gpadl; } if (userdatalen) @@ -195,6 +195,9 @@ error1: list_del(&open_info->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); +error_gpadl: + vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle); + error0: free_pages((unsigned long)out, get_order(send_ringbuffer_size + recv_ringbuffer_size)); diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 92462843db66..648cea462269 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -800,7 +800,7 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap, clk_disable(i2c->clk); spin_unlock_irqrestore(&i2c->lock, flags); - return ret; + return ret < 0 ? ret : num; } static u32 rk3x_i2c_func(struct i2c_adapter *adap) diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 63663332391d..c421e1159037 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -596,6 +596,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) adap->bus_recovery_info->set_scl(adap, 1); return i2c_generic_recovery(adap); } +EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery); int i2c_generic_gpio_recovery(struct i2c_adapter *adap) { @@ -610,6 +611,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap) return ret; } +EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery); int i2c_recover_bus(struct i2c_adapter *adap) { @@ -619,6 +621,7 @@ int i2c_recover_bus(struct i2c_adapter *adap) dev_dbg(&adap->dev, "Trying i2c bus recovery\n"); return adap->bus_recovery_info->recover_bus(adap); } +EXPORT_SYMBOL_GPL(i2c_recover_bus); static int i2c_device_probe(struct device *dev) { diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index 593f7ca9adc7..06cc1ff088f1 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c @@ -32,8 +32,9 @@ struct i2c_mux_priv { struct i2c_algorithm algo; struct i2c_adapter *parent; - void *mux_priv; /* the mux chip/device */ - u32 chan_id; /* the channel id */ + struct device *mux_dev; + void *mux_priv; + u32 chan_id; int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id); int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id); @@ -119,6 +120,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, /* Set up private adapter data */ priv->parent = parent; + priv->mux_dev = mux_dev; priv->mux_priv = mux_priv; priv->chan_id = chan_id; priv->select = select; @@ -203,7 +205,7 @@ void i2c_del_mux_adapter(struct i2c_adapter *adap) char symlink_name[20]; snprintf(symlink_name, sizeof(symlink_name), "channel-%u", priv->chan_id); - sysfs_remove_link(&adap->dev.parent->kobj, symlink_name); + sysfs_remove_link(&priv->mux_dev->kobj, symlink_name); sysfs_remove_link(&priv->adap.dev.kobj, "mux_device"); i2c_del_adapter(adap); diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 8c014b5dab4c..38acb3cfc545 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -99,12 +99,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, if (dmasync) dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); + if (!size) + return ERR_PTR(-EINVAL); + /* * If the combination of the addr and size requested for this memory * region causes an integer overflow, return error. */ - if ((PAGE_ALIGN(addr + size) <= size) || - (PAGE_ALIGN(addr + size) <= addr)) + if (((addr + size) < addr) || + PAGE_ALIGN(addr + size) < (addr + size)) return ERR_PTR(-EINVAL); if (!can_do_mlock()) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index c880329b4d64..51722be3e5e9 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -2564,8 +2564,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); - *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | - wr->wr.ud.hlen); + *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen); *lso_seg_len = halign; return 0; } diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 20e859a6f1a6..76eb57b31a59 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -409,8 +409,8 @@ int iser_send_command(struct iscsi_conn *conn, if (scsi_prot_sg_count(sc)) { prot_buf->buf = scsi_prot_sglist(sc); prot_buf->size = scsi_prot_sg_count(sc); - prot_buf->data_len = data_buf->data_len >> - ilog2(sc->device->sector_size) * 8; + prot_buf->data_len = (data_buf->data_len >> + ilog2(sc->device->sector_size)) * 8; } if (hdr->flags & ISCSI_FLAG_CMD_READ) { diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index dafb3c531f96..0b57ba2c22cb 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -222,7 +222,7 @@ fail: static void isert_free_rx_descriptors(struct isert_conn *isert_conn) { - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct ib_device *ib_dev = isert_conn->conn_device->ib_device; struct iser_rx_desc *rx_desc; int i; @@ -719,8 +719,8 @@ out: static void isert_connect_release(struct isert_conn *isert_conn) { - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct isert_device *device = isert_conn->conn_device; + struct ib_device *ib_dev = device->ib_device; isert_dbg("conn %p\n", isert_conn); @@ -728,7 +728,8 @@ isert_connect_release(struct isert_conn *isert_conn) isert_conn_free_fastreg_pool(isert_conn); isert_free_rx_descriptors(isert_conn); - rdma_destroy_id(isert_conn->conn_cm_id); + if (isert_conn->conn_cm_id) + rdma_destroy_id(isert_conn->conn_cm_id); if (isert_conn->conn_qp) { struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context; @@ -878,12 +879,15 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, return 0; } -static void +static int isert_connect_error(struct rdma_cm_id *cma_id) { struct isert_conn *isert_conn = cma_id->qp->qp_context; + isert_conn->conn_cm_id = NULL; isert_put_conn(isert_conn); + + return -1; } static int @@ -912,7 +916,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ case RDMA_CM_EVENT_CONNECT_ERROR: - isert_connect_error(cma_id); + ret = isert_connect_error(cma_id); break; default: isert_err("Unhandled RDMA CMA event: %d\n", event->event); @@ -1848,11 +1852,13 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; spin_unlock_bh(&cmd->istate_lock); - if (ret) + if (ret) { + target_put_sess_cmd(se_cmd->se_sess, se_cmd); transport_send_check_condition_and_sense(se_cmd, se_cmd->pi_err, 0); - else + } else { target_execute_cmd(se_cmd); + } } static void diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 6e22682c8255..991dc6b20a58 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -893,6 +893,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse) } /* + * This writes the reg_07 value again to the hardware at the end of every + * set_rate call because the register loses its value. reg_07 allows setting + * absolute mode on v4 hardware + */ +static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse, + unsigned int rate) +{ + struct elantech_data *etd = psmouse->private; + + etd->original_set_rate(psmouse, rate); + if (elantech_write_reg(psmouse, 0x07, etd->reg_07)) + psmouse_err(psmouse, "restoring reg_07 failed\n"); +} + +/* * Put the touchpad into absolute mode */ static int elantech_set_absolute_mode(struct psmouse *psmouse) @@ -1094,6 +1109,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons + * Asus TP500LN 0x381f17 10, 14, 0e clickpad + * Asus X750JN 0x381f17 10, 14, 0e clickpad * Asus UX31 0x361f00 20, 15, 0e clickpad * Asus UX32VD 0x361f02 00, 15, 0e clickpad * Avatar AVIU-145A2 0x361f00 ? clickpad @@ -1635,6 +1652,11 @@ int elantech_init(struct psmouse *psmouse) goto init_fail; } + if (etd->fw_version == 0x381f17) { + etd->original_set_rate = psmouse->set_rate; + psmouse->set_rate = elantech_set_rate_restore_reg_07; + } + if (elantech_set_input_params(psmouse)) { psmouse_err(psmouse, "failed to query touchpad range.\n"); goto init_fail; diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h index 6f3afec02f03..f965d1569cc3 100644 --- a/drivers/input/mouse/elantech.h +++ b/drivers/input/mouse/elantech.h @@ -142,6 +142,7 @@ struct elantech_data { struct finger_pos mt[ETP_MAX_FINGERS]; unsigned char parity[256]; int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param); + void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate); }; #ifdef CONFIG_MOUSE_PS2_ELANTECH diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 08981be7baa1..a9f2266049af 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -920,11 +920,10 @@ static int crypt_convert(struct crypt_config *cc, switch (r) { /* async */ + case -EINPROGRESS: case -EBUSY: wait_for_completion(&ctx->restart); reinit_completion(&ctx->restart); - /* fall through*/ - case -EINPROGRESS: ctx->req = NULL; ctx->cc_sector++; continue; @@ -1315,10 +1314,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); struct crypt_config *cc = io->cc; - if (error == -EINPROGRESS) { - complete(&ctx->restart); + if (error == -EINPROGRESS) return; - } if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); @@ -1329,12 +1326,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); if (!atomic_dec_and_test(&ctx->cc_pending)) - return; + goto done; if (bio_data_dir(io->base_bio) == READ) kcryptd_crypt_read_done(io); else kcryptd_crypt_write_io_submit(io, 1); +done: + if (!completion_done(&ctx->restart)) + complete(&ctx->restart); } static void kcryptd_crypt(struct work_struct *work) diff --git a/drivers/md/md.c b/drivers/md/md.c index 709755fb6d7b..193feb92c320 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -248,6 +248,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio) const int rw = bio_data_dir(bio); struct mddev *mddev = q->queuedata; unsigned int sectors; + int cpu; if (mddev == NULL || mddev->pers == NULL || !mddev->ready) { @@ -283,7 +284,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio) sectors = bio_sectors(bio); mddev->pers->make_request(mddev, bio); - generic_start_io_acct(rw, sectors, &mddev->gendisk->part0); + cpu = part_stat_lock(); + part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); + part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); + part_stat_unlock(); if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) wake_up(&mddev->sb_wait); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index ba6b85de96d2..d5c12e5b6125 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -319,7 +319,7 @@ static struct strip_zone *find_zone(struct r0conf *conf, /* * remaps the bio to the target device. we separate two flows. - * power 2 flow and a general flow for the sake of perfromance + * power 2 flow and a general flow for the sake of performance */ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone, sector_t sector, sector_t *sector_offset) @@ -537,6 +537,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) split = bio; } + sector = bio->bi_iter.bi_sector; zone = find_zone(mddev->private, §or); tmp_dev = map_sector(mddev, zone, sector, §or); split->bi_bdev = tmp_dev->bdev; diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c index 77c78de4f5bf..7020659f23c2 100644 --- a/drivers/media/rc/img-ir/img-ir-core.c +++ b/drivers/media/rc/img-ir/img-ir-core.c @@ -146,7 +146,7 @@ static int img_ir_remove(struct platform_device *pdev) { struct img_ir_priv *priv = platform_get_drvdata(pdev); - free_irq(priv->irq, img_ir_isr); + free_irq(priv->irq, priv); img_ir_remove_hw(priv); img_ir_remove_raw(priv); diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c index a47629108c1b..5bd940fd7a50 100644 --- a/drivers/media/usb/stk1160/stk1160-v4l.c +++ b/drivers/media/usb/stk1160/stk1160-v4l.c @@ -244,6 +244,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev) if (mutex_lock_interruptible(&dev->v4l_lock)) return -ERESTARTSYS; + /* + * Once URBs are cancelled, the URB complete handler + * won't be running. This is required to safely release the + * current buffer (dev->isoc_ctl.buf). + */ stk1160_cancel_isoc(dev); /* @@ -624,8 +629,16 @@ void stk1160_clear_queue(struct stk1160 *dev) stk1160_info("buffer [%p/%d] aborted\n", buf, buf->vb.v4l2_buf.index); } - /* It's important to clear current buffer */ - dev->isoc_ctl.buf = NULL; + + /* It's important to release the current buffer */ + if (dev->isoc_ctl.buf) { + buf = dev->isoc_ctl.buf; + dev->isoc_ctl.buf = NULL; + + vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); + stk1160_info("buffer [%p/%d] aborted\n", + buf, buf->vb.v4l2_buf.index); + } spin_unlock_irqrestore(&dev->buf_lock, flags); } diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index fc145d202c46..922a750640e8 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error) if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) { if (msb->data_dir == READ) { - for (cnt = 0; cnt < msb->current_seg; cnt++) + for (cnt = 0; cnt < msb->current_seg; cnt++) { t_len += msb->req_sg[cnt].length / msb->page_size; @@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error) t_len += msb->current_page - 1; t_len *= msb->page_size; + } } } else t_len = blk_rq_bytes(msb->block_req); diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 2a87f69be53d..1aed3b7b8d9b 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -128,7 +128,7 @@ static int mfd_add_device(struct device *parent, int id, int platform_id; int r; - if (id < 0) + if (id == PLATFORM_DEVID_AUTO) platform_id = id; else platform_id = id + cell->id; diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 15cb8b7ffc34..9bad746425cb 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -908,7 +908,9 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, return PTR_ERR(host->clk_mmc); } - host->reset = devm_reset_control_get(&pdev->dev, "ahb"); + host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); + if (PTR_ERR(host->reset) == -EPROBE_DEFER) + return PTR_ERR(host->reset); ret = clk_prepare_enable(host->clk_ahb); if (ret) { diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c index 9d2e16f3150a..b5e154856994 100644 --- a/drivers/mtd/ubi/attach.c +++ b/drivers/mtd/ubi/attach.c @@ -410,7 +410,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, second_is_newer = !second_is_newer; } else { dbg_bld("PEB %d CRC is OK", pnum); - bitflips = !!err; + bitflips |= !!err; } mutex_unlock(&ubi->buf_mutex); diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 3410ea8109f8..d347ecf1d27c 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -454,7 +454,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd, /* Validate the request */ err = -EINVAL; if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || - req.bytes < 0 || req.lnum >= vol->usable_leb_size) + req.bytes < 0 || req.bytes > vol->usable_leb_size) break; err = get_exclusive(ubi, desc); diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index a40020cf0923..8c74076efb9e 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -1362,7 +1362,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai) * during re-size. */ ubi_move_aeb_to_list(av, aeb, &ai->erase); - vol->eba_tbl[aeb->lnum] = aeb->pnum; + else + vol->eba_tbl[aeb->lnum] = aeb->pnum; } } diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 834f6fe1f5fa..4e07f81094ae 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1005,7 +1005,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, int shutdown) { int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; - int vol_id = -1, uninitialized_var(lnum); + int vol_id = -1, lnum = -1; #ifdef CONFIG_MTD_UBI_FASTMAP int anchor = wrk->anchor; #endif diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 83140cbb5f01..82f9c554021d 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int cleaned_count); @@ -3548,8 +3553,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) msleep(1); /* e1000_down has a dependency on max_frame_size */ hw->max_frame_size = max_frame; - if (netif_running(netdev)) + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; e1000_down(adapter); + } /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index af829c578400..7ace07dad6a3 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (!np) { dev_err(&pdev->dev, "missing phy-handle\n"); - return -EINVAL; + err = -EINVAL; + goto err_netdev; } of_property_read_u32(np, "reg", &pep->phy_addr); pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); @@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) pep->smi_bus = mdiobus_alloc(); if (pep->smi_bus == NULL) { err = -ENOMEM; - goto err_base; + goto err_netdev; } pep->smi_bus->priv = pep; pep->smi_bus->name = "pxa168_eth smi"; @@ -1551,13 +1552,10 @@ err_mdiobus: mdiobus_unregister(pep->smi_bus); err_free_mdio: mdiobus_free(pep->smi_bus); -err_base: - iounmap(pep->base); err_netdev: free_netdev(dev); err_clk: - clk_disable(clk); - clk_put(clk); + clk_disable_unprepare(clk); return err; } @@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev) if (pep->phy) phy_disconnect(pep->phy); if (pep->clk) { - clk_disable(pep->clk); - clk_put(pep->clk); - pep->clk = NULL; + clk_disable_unprepare(pep->clk); } - iounmap(pep->base); - pep->base = NULL; mdiobus_unregister(pep->smi_bus); mdiobus_free(pep->smi_bus); unregister_netdev(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 90e0f045a6bc..33637eb16ca0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -983,20 +983,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc) struct mlx4_en_priv *priv = netdev_priv(dev); /* check if requested function is supported by the device */ - if ((hfunc == ETH_RSS_HASH_TOP && - !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) || - (hfunc == ETH_RSS_HASH_XOR && - !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))) - return -EINVAL; + if (hfunc == ETH_RSS_HASH_TOP) { + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) + return -EINVAL; + if (!(dev->features & NETIF_F_RXHASH)) + en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); + return 0; + } else if (hfunc == ETH_RSS_HASH_XOR) { + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)) + return -EINVAL; + if (dev->features & NETIF_F_RXHASH) + en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); + return 0; + } - priv->rss_hash_fn = hfunc; - if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH)) - en_warn(priv, - "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); - if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH)) - en_warn(priv, - "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); - return 0; + return -EINVAL; } static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, @@ -1070,6 +1071,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, priv->prof->rss_rings = rss_rings; if (key) memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); + if (hfunc != ETH_RSS_HASH_NO_CHANGE) + priv->rss_hash_fn = hfunc; if (port_up) { err = mlx4_en_start_port(dev); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index af034dba9bd6..9d15566521a7 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1716,6 +1716,7 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) { /* note: a 0-length skb is used as an error indication */ if (skb->len > 0) { + skb_checksum_complete_unset(skb); #ifdef CONFIG_PPP_MULTILINK /* XXX do channel-level decompression here */ if (PPP_PROTO(skb) == PPP_MP) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index e06bafee37f9..5034660bf411 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/ + {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ @@ -377,6 +378,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ + {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */ {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/ {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/ {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/ diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c index 7f1669cdea09..779dc2b2ca75 100644 --- a/drivers/net/wireless/ti/wl18xx/debugfs.c +++ b/drivers/net/wireless/ti/wl18xx/debugfs.c @@ -136,7 +136,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50); WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate, AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE); diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h index 0f2cfb0d2a9e..bf14676e6515 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.h +++ b/drivers/net/wireless/ti/wlcore/debugfs.h @@ -26,8 +26,8 @@ #include "wlcore.h" -int wl1271_format_buffer(char __user *userbuf, size_t count, - loff_t *ppos, char *fmt, ...); +__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count, + loff_t *ppos, char *fmt, ...); int wl1271_debugfs_init(struct wl1271 *wl); void wl1271_debugfs_exit(struct wl1271 *wl); diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c index 01ba865863ee..b5eb3754578c 100644 --- a/drivers/nfc/st21nfcb/i2c.c +++ b/drivers/nfc/st21nfcb/i2c.c @@ -109,7 +109,7 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb) return phy->ndlc->hard_fault; r = i2c_master_send(client, skb->data, skb->len); - if (r == -EREMOTEIO) { /* Retry, chip was in standby */ + if (r < 0) { /* Retry, chip was in standby */ usleep_range(1000, 4000); r = i2c_master_send(client, skb->data, skb->len); } @@ -148,7 +148,7 @@ static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy, struct i2c_client *client = phy->i2c_dev; r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE); - if (r == -EREMOTEIO) { /* Retry, chip was in standby */ + if (r < 0) { /* Retry, chip was in standby */ usleep_range(1000, 4000); r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE); } diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index 15c0fab2bfa1..bceb30b539f3 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c @@ -1026,9 +1026,9 @@ static int compal_probe(struct platform_device *pdev) if (err) return err; - hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, - "compal", data, - compal_hwmon_groups); + hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev, + "compal", data, + compal_hwmon_groups); if (IS_ERR(hwmon_dev)) { err = PTR_ERR(hwmon_dev); goto remove; @@ -1036,7 +1036,9 @@ static int compal_probe(struct platform_device *pdev) /* Power supply */ initialize_power_supply_data(data); - power_supply_register(&compal_device->dev, &data->psy); + err = power_supply_register(&compal_device->dev, &data->psy); + if (err < 0) + goto remove; platform_set_drvdata(pdev, data); diff --git a/drivers/power/ipaq_micro_battery.c b/drivers/power/ipaq_micro_battery.c index 9d694605cdb7..96b15e003f3f 100644 --- a/drivers/power/ipaq_micro_battery.c +++ b/drivers/power/ipaq_micro_battery.c @@ -226,6 +226,7 @@ static struct power_supply micro_ac_power = { static int micro_batt_probe(struct platform_device *pdev) { struct micro_battery *mb; + int ret; mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL); if (!mb) @@ -233,14 +234,30 @@ static int micro_batt_probe(struct platform_device *pdev) mb->micro = dev_get_drvdata(pdev->dev.parent); mb->wq = create_singlethread_workqueue("ipaq-battery-wq"); + if (!mb->wq) + return -ENOMEM; + INIT_DELAYED_WORK(&mb->update, micro_battery_work); platform_set_drvdata(pdev, mb); queue_delayed_work(mb->wq, &mb->update, 1); - power_supply_register(&pdev->dev, µ_batt_power); - power_supply_register(&pdev->dev, µ_ac_power); + + ret = power_supply_register(&pdev->dev, µ_batt_power); + if (ret < 0) + goto batt_err; + + ret = power_supply_register(&pdev->dev, µ_ac_power); + if (ret < 0) + goto ac_err; dev_info(&pdev->dev, "iPAQ micro battery driver\n"); return 0; + +ac_err: + power_supply_unregister(µ_ac_power); +batt_err: + cancel_delayed_work_sync(&mb->update); + destroy_workqueue(mb->wq); + return ret; } static int micro_batt_remove(struct platform_device *pdev) @@ -251,6 +268,7 @@ static int micro_batt_remove(struct platform_device *pdev) power_supply_unregister(µ_ac_power); power_supply_unregister(µ_batt_power); cancel_delayed_work_sync(&mb->update); + destroy_workqueue(mb->wq); return 0; } diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c index 21fc233c7d61..176dab2e4c16 100644 --- a/drivers/power/lp8788-charger.c +++ b/drivers/power/lp8788-charger.c @@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev, pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop); pchg->battery.get_property = lp8788_battery_get_property; - if (power_supply_register(&pdev->dev, &pchg->battery)) + if (power_supply_register(&pdev->dev, &pchg->battery)) { + power_supply_unregister(&pchg->charger); return -EPERM; + } return 0; } diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c index 7ef445a6cfa6..cf907609ec49 100644 --- a/drivers/power/twl4030_madc_battery.c +++ b/drivers/power/twl4030_madc_battery.c @@ -192,6 +192,7 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev) { struct twl4030_madc_battery *twl4030_madc_bat; struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data; + int ret = 0; twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL); if (!twl4030_madc_bat) @@ -216,9 +217,11 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev) twl4030_madc_bat->pdata = pdata; platform_set_drvdata(pdev, twl4030_madc_bat); - power_supply_register(&pdev->dev, &twl4030_madc_bat->psy); + ret = power_supply_register(&pdev->dev, &twl4030_madc_bat->psy); + if (ret < 0) + kfree(twl4030_madc_bat); - return 0; + return ret; } static int twl4030_madc_battery_remove(struct platform_device *pdev) diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 2d5ab6d969ec..454536c49315 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) static int mvs_task_prep_ata(struct mvs_info *mvi, struct mvs_task_exec_info *tei) { - struct sas_ha_struct *sha = mvi->sas; struct sas_task *task = tei->task; struct domain_device *dev = task->dev; struct mvs_device *mvi_dev = dev->lldd_dev; struct mvs_cmd_hdr *hdr = tei->hdr; struct asd_sas_port *sas_port = dev->port; - struct sas_phy *sphy = dev->phy; - struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number]; struct mvs_slot_info *slot; void *buf_prd; u32 tag = tei->tag, hdr_tag; @@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, slot->tx = mvi->tx_prod; del_q = TXQ_MODE_I | tag | (TXQ_CMD_STP << TXQ_CMD_SHIFT) | - (MVS_PHY_ID << TXQ_PHY_SHIFT) | + ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) | (mvi_dev->taskfileset << TXQ_SRS_SHIFT); mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 05ea0d49a3a3..ad35ccdb92a2 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3100,6 +3100,7 @@ static void scsi_disk_release(struct device *dev) ida_remove(&sd_index_ida, sdkp->index); spin_unlock(&sd_index_lock); + blk_integrity_unregister(disk); disk->private_data = NULL; put_disk(disk); put_device(&sdkp->device->sdev_gendev); diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 14c7d42a11c2..5c06d292b94c 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -77,7 +77,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp) disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; - if (!sdkp) + if (!sdkp->ATO) return; if (type == SD_DIF_TYPE3_PROTECTION) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 4cff0ddc2c25..3483b1d21681 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -741,21 +741,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, if (bounce_sgl[j].length == PAGE_SIZE) { /* full..move to next entry */ sg_kunmap_atomic(bounce_addr); + bounce_addr = 0; j++; + } - /* if we need to use another bounce buffer */ - if (srclen || i != orig_sgl_count - 1) - bounce_addr = sg_kmap_atomic(bounce_sgl,j); + /* if we need to use another bounce buffer */ + if (srclen && bounce_addr == 0) + bounce_addr = sg_kmap_atomic(bounce_sgl, j); - } else if (srclen == 0 && i == orig_sgl_count - 1) { - /* unmap the last bounce that is < PAGE_SIZE */ - sg_kunmap_atomic(bounce_addr); - } } sg_kunmap_atomic(src_addr - orig_sgl[i].offset); } + if (bounce_addr) + sg_kunmap_atomic(bounce_addr); + local_irq_restore(flags); return total_copied; diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index fe1b7699fab6..308c2d39b7ce 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -371,8 +371,6 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, if (spi_imx->dma_is_inited) { dma = readl(spi_imx->base + MX51_ECSPI_DMA); - spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2; - spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2; spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2; rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET; tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET; @@ -869,6 +867,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, master->max_dma_len = MAX_SDMA_BD_BYTES; spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; + spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2; + spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2; spi_imx->dma_is_inited = 1; return 0; diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 6941e04afb8c..5c854355f621 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -249,7 +249,10 @@ static int spidev_message(struct spidev_data *spidev, k_tmp->len = u_tmp->len; total += k_tmp->len; - if (total > bufsiz) { + /* Check total length of transfers. Also check each + * transfer length to avoid arithmetic overflow. + */ + if (total > bufsiz || k_tmp->len > bufsiz) { status = -EMSGSIZE; goto done; } diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index 7bdb62bf6b40..f83e00c78051 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c @@ -114,7 +114,7 @@ void sync_timeline_signal(struct sync_timeline *obj) list_for_each_entry_safe(pt, next, &obj->active_list_head, active_list) { if (fence_is_signaled_locked(&pt->base)) - list_del(&pt->active_list); + list_del_init(&pt->active_list); } spin_unlock_irqrestore(&obj->child_list_lock, flags); diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c index 98325b7b4462..47de68b6334f 100644 --- a/drivers/staging/panel/panel.c +++ b/drivers/staging/panel/panel.c @@ -314,11 +314,11 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES]; * LCD types */ #define LCD_TYPE_NONE 0 -#define LCD_TYPE_OLD 1 -#define LCD_TYPE_KS0074 2 -#define LCD_TYPE_HANTRONIX 3 -#define LCD_TYPE_NEXCOM 4 -#define LCD_TYPE_CUSTOM 5 +#define LCD_TYPE_CUSTOM 1 +#define LCD_TYPE_OLD 2 +#define LCD_TYPE_KS0074 3 +#define LCD_TYPE_HANTRONIX 4 +#define LCD_TYPE_NEXCOM 5 /* * keypad types @@ -481,7 +481,7 @@ MODULE_PARM_DESC(keypad_type, static int lcd_type = NOT_SET; module_param(lcd_type, int, 0000); MODULE_PARM_DESC(lcd_type, - "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in"); + "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom"); static int lcd_height = NOT_SET; module_param(lcd_height, int, 0000); diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c index b5b0155961f2..baaab3fcae8f 100644 --- a/drivers/staging/vt6655/rxtx.c +++ b/drivers/staging/vt6655/rxtx.c @@ -1309,10 +1309,18 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx, priv->hw->conf.chandef.chan->hw_value); } - if (current_rate > RATE_11M) - pkt_type = (u8)priv->byPacketType; - else + if (current_rate > RATE_11M) { + if (info->band == IEEE80211_BAND_5GHZ) { + pkt_type = PK_TYPE_11A; + } else { + if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) + pkt_type = PK_TYPE_11GB; + else + pkt_type = PK_TYPE_11GA; + } + } else { pkt_type = PK_TYPE_11B; + } /*Set fifo controls */ if (pkt_type == PK_TYPE_11A) diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index d836de200a03..15cfcd6b9343 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -264,40 +264,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, struct se_device *se_dev = cmd->se_dev; struct fd_dev *dev = FD_DEV(se_dev); struct file *prot_fd = dev->fd_prot_file; - struct scatterlist *sg; loff_t pos = (cmd->t_task_lba * se_dev->prot_length); unsigned char *buf; - u32 prot_size, len, size; - int rc, ret = 1, i; + u32 prot_size; + int rc, ret = 1; prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * se_dev->prot_length; if (!is_write) { - fd_prot->prot_buf = vzalloc(prot_size); + fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL); if (!fd_prot->prot_buf) { pr_err("Unable to allocate fd_prot->prot_buf\n"); return -ENOMEM; } buf = fd_prot->prot_buf; - fd_prot->prot_sg_nents = cmd->t_prot_nents; - fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) * - fd_prot->prot_sg_nents, GFP_KERNEL); + fd_prot->prot_sg_nents = 1; + fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist), + GFP_KERNEL); if (!fd_prot->prot_sg) { pr_err("Unable to allocate fd_prot->prot_sg\n"); - vfree(fd_prot->prot_buf); + kfree(fd_prot->prot_buf); return -ENOMEM; } - size = prot_size; - - for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) { - - len = min_t(u32, PAGE_SIZE, size); - sg_set_buf(sg, buf, len); - size -= len; - buf += len; - } + sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents); + sg_set_buf(fd_prot->prot_sg, buf, prot_size); } if (is_write) { @@ -318,7 +310,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, if (is_write || ret < 0) { kfree(fd_prot->prot_sg); - vfree(fd_prot->prot_buf); + kfree(fd_prot->prot_buf); } return ret; @@ -544,6 +536,56 @@ fd_execute_write_same(struct se_cmd *cmd) return 0; } +static int +fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb, + void *buf, size_t bufsize) +{ + struct fd_dev *fd_dev = FD_DEV(se_dev); + struct file *prot_fd = fd_dev->fd_prot_file; + sector_t prot_length, prot; + loff_t pos = lba * se_dev->prot_length; + + if (!prot_fd) { + pr_err("Unable to locate fd_dev->fd_prot_file\n"); + return -ENODEV; + } + + prot_length = nolb * se_dev->prot_length; + + for (prot = 0; prot < prot_length;) { + sector_t len = min_t(sector_t, bufsize, prot_length - prot); + ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot); + + if (ret != len) { + pr_err("vfs_write to prot file failed: %zd\n", ret); + return ret < 0 ? ret : -ENODEV; + } + prot += ret; + } + + return 0; +} + +static int +fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) +{ + void *buf; + int rc; + + buf = (void *)__get_free_page(GFP_KERNEL); + if (!buf) { + pr_err("Unable to allocate FILEIO prot buf\n"); + return -ENOMEM; + } + memset(buf, 0xff, PAGE_SIZE); + + rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE); + + free_page((unsigned long)buf); + + return rc; +} + static sense_reason_t fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) { @@ -551,6 +593,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) struct inode *inode = file->f_mapping->host; int ret; + if (cmd->se_dev->dev_attrib.pi_prot_type) { + ret = fd_do_prot_unmap(cmd, lba, nolb); + if (ret) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + if (S_ISBLK(inode->i_mode)) { /* The backend is block device, use discard */ struct block_device *bdev = inode->i_bdev; @@ -653,11 +701,11 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 0, fd_prot.prot_sg, 0); if (rc) { kfree(fd_prot.prot_sg); - vfree(fd_prot.prot_buf); + kfree(fd_prot.prot_buf); return rc; } kfree(fd_prot.prot_sg); - vfree(fd_prot.prot_buf); + kfree(fd_prot.prot_buf); } } else { memset(&fd_prot, 0, sizeof(struct fd_prot)); @@ -673,7 +721,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 0, fd_prot.prot_sg, 0); if (rc) { kfree(fd_prot.prot_sg); - vfree(fd_prot.prot_buf); + kfree(fd_prot.prot_buf); return rc; } } @@ -709,7 +757,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, if (ret < 0) { kfree(fd_prot.prot_sg); - vfree(fd_prot.prot_buf); + kfree(fd_prot.prot_buf); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } @@ -873,48 +921,28 @@ static int fd_init_prot(struct se_device *dev) static int fd_format_prot(struct se_device *dev) { - struct fd_dev *fd_dev = FD_DEV(dev); - struct file *prot_fd = fd_dev->fd_prot_file; - sector_t prot_length, prot; unsigned char *buf; - loff_t pos = 0; int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; - int rc, ret = 0, size, len; + int ret; if (!dev->dev_attrib.pi_prot_type) { pr_err("Unable to format_prot while pi_prot_type == 0\n"); return -ENODEV; } - if (!prot_fd) { - pr_err("Unable to locate fd_dev->fd_prot_file\n"); - return -ENODEV; - } buf = vzalloc(unit_size); if (!buf) { pr_err("Unable to allocate FILEIO prot buf\n"); return -ENOMEM; } - prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length; - size = prot_length; pr_debug("Using FILEIO prot_length: %llu\n", - (unsigned long long)prot_length); + (unsigned long long)(dev->transport->get_blocks(dev) + 1) * + dev->prot_length); memset(buf, 0xff, unit_size); - for (prot = 0; prot < prot_length; prot += unit_size) { - len = min(unit_size, size); - rc = kernel_write(prot_fd, buf, len, pos); - if (rc != len) { - pr_err("vfs_write to prot file failed: %d\n", rc); - ret = -ENODEV; - goto out; - } - pos += len; - size -= len; - } - -out: + ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1, + buf, unit_size); vfree(buf); return ret; } diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 36b471389169..48de05366645 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -299,7 +299,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o return 0; } -static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) +static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) { unsigned char *buf, *addr; struct scatterlist *sg; @@ -363,7 +363,7 @@ sbc_execute_rw(struct se_cmd *cmd) cmd->data_direction); } -static sense_reason_t compare_and_write_post(struct se_cmd *cmd) +static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) { struct se_device *dev = cmd->se_dev; @@ -386,7 +386,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd) return TCM_NO_SENSE; } -static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) +static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) { struct se_device *dev = cmd->se_dev; struct scatterlist *write_sg = NULL, *sg; @@ -401,11 +401,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) /* * Handle early failure in transport_generic_request_failure(), - * which will not have taken ->caw_mutex yet.. + * which will not have taken ->caw_sem yet.. */ - if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) + if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg)) return TCM_NO_SENSE; /* + * Handle special case for zero-length COMPARE_AND_WRITE + */ + if (!cmd->data_length) + goto out; + /* * Immediately exit + release dev->caw_sem if command has already * been failed with a non-zero SCSI status. */ diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index ac3cbabdbdf0..f786de0290db 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1615,11 +1615,11 @@ void transport_generic_request_failure(struct se_cmd *cmd, transport_complete_task_attr(cmd); /* * Handle special case for COMPARE_AND_WRITE failure, where the - * callback is expected to drop the per device ->caw_mutex. + * callback is expected to drop the per device ->caw_sem. */ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && cmd->transport_complete_callback) - cmd->transport_complete_callback(cmd); + cmd->transport_complete_callback(cmd, false); switch (sense_reason) { case TCM_NON_EXISTENT_LUN: @@ -1975,8 +1975,12 @@ static void target_complete_ok_work(struct work_struct *work) if (cmd->transport_complete_callback) { sense_reason_t rc; - rc = cmd->transport_complete_callback(cmd); + rc = cmd->transport_complete_callback(cmd, true); if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { + if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && + !cmd->data_length) + goto queue_rsp; + return; } else if (rc) { ret = transport_send_check_condition_and_sense(cmd, @@ -1990,6 +1994,7 @@ static void target_complete_ok_work(struct work_struct *work) } } +queue_rsp: switch (cmd->data_direction) { case DMA_FROM_DEVICE: spin_lock(&cmd->se_lun->lun_sep_lock); @@ -2094,6 +2099,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd) static inline void transport_free_pages(struct se_cmd *cmd) { if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { + /* + * Release special case READ buffer payload required for + * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE + */ + if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { + transport_free_sgl(cmd->t_bidi_data_sg, + cmd->t_bidi_data_nents); + cmd->t_bidi_data_sg = NULL; + cmd->t_bidi_data_nents = 0; + } transport_reset_sgl_orig(cmd); return; } @@ -2246,6 +2261,7 @@ sense_reason_t transport_generic_new_cmd(struct se_cmd *cmd) { int ret = 0; + bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); /* * Determine is the TCM fabric module has already allocated physical @@ -2254,7 +2270,6 @@ transport_generic_new_cmd(struct se_cmd *cmd) */ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && cmd->data_length) { - bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); if ((cmd->se_cmd_flags & SCF_BIDI) || (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { @@ -2285,6 +2300,20 @@ transport_generic_new_cmd(struct se_cmd *cmd) cmd->data_length, zero_flag); if (ret < 0) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && + cmd->data_length) { + /* + * Special case for COMPARE_AND_WRITE with fabrics + * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. + */ + u32 caw_length = cmd->t_task_nolb * + cmd->se_dev->dev_attrib.block_size; + + ret = target_alloc_sgl(&cmd->t_bidi_data_sg, + &cmd->t_bidi_data_nents, + caw_length, zero_flag); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } /* * If this command is not a write we can execute it right here, diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index a051a7a2b1bd..a81f9dd7ee97 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -245,7 +245,7 @@ static void wdm_int_callback(struct urb *urb) case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: dev_dbg(&desc->intf->dev, "NOTIFY_RESPONSE_AVAILABLE received: index %d len %d", - dr->wIndex, dr->wLength); + le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength)); break; case USB_CDC_NOTIFY_NETWORK_CONNECTION: @@ -262,7 +262,9 @@ static void wdm_int_callback(struct urb *urb) clear_bit(WDM_POLL_RUNNING, &desc->flags); dev_err(&desc->intf->dev, "unknown notification %d received: index %d len %d\n", - dr->bNotificationType, dr->wIndex, dr->wLength); + dr->bNotificationType, + le16_to_cpu(dr->wIndex), + le16_to_cpu(dr->wLength)); goto exit; } @@ -408,7 +410,7 @@ static ssize_t wdm_write USB_RECIP_INTERFACE); req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND; req->wValue = 0; - req->wIndex = desc->inum; + req->wIndex = desc->inum; /* already converted */ req->wLength = cpu_to_le16(count); set_bit(WDM_IN_USE, &desc->flags); desc->outbuf = buf; @@ -422,7 +424,7 @@ static ssize_t wdm_write rv = usb_translate_errors(rv); } else { dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d", - req->wIndex); + le16_to_cpu(req->wIndex)); } out: usb_autopm_put_interface(desc->intf); @@ -820,7 +822,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE); desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; desc->irq->wValue = 0; - desc->irq->wIndex = desc->inum; + desc->irq->wIndex = desc->inum; /* already converted */ desc->irq->wLength = cpu_to_le16(desc->wMaxCommand); usb_fill_control_urb( diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index b4bfa3ac4b12..95409aacc076 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -3397,10 +3397,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) if (status) { dev_dbg(&port_dev->dev, "can't resume, status %d\n", status); } else { - /* drive resume for at least 20 msec */ + /* drive resume for USB_RESUME_TIMEOUT msec */ dev_dbg(&udev->dev, "usb %sresume\n", (PMSG_IS_AUTO(msg) ? "auto-" : "")); - msleep(25); + msleep(USB_RESUME_TIMEOUT); /* Virtual root hubs can trigger on GET_PORT_STATUS to * stop resume signaling. Then finish the resume diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c index 90545980542f..6385c198c134 100644 --- a/drivers/usb/gadget/legacy/printer.c +++ b/drivers/usb/gadget/legacy/printer.c @@ -1031,6 +1031,15 @@ unknown: break; } /* host either stalls (value < 0) or reports success */ + if (value >= 0) { + req->length = value; + req->zero = value < wLength; + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (value < 0) { + ERROR(dev, "%s:%d Error!\n", __func__, __LINE__); + req->status = 0; + } + } return value; } diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 38bfeedae1d0..002ac2944d24 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -792,12 +792,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) ehci->reset_done[i] == 0)) continue; - /* start 20 msec resume signaling from this port, - * and make hub_wq collect PORT_STAT_C_SUSPEND to - * stop that signaling. Use 5 ms extra for safety, - * like usb_port_resume() does. + /* start USB_RESUME_TIMEOUT msec resume signaling from + * this port, and make hub_wq collect + * PORT_STAT_C_SUSPEND to stop that signaling. */ - ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); + ehci->reset_done[i] = jiffies + + msecs_to_jiffies(USB_RESUME_TIMEOUT); set_bit(i, &ehci->resuming_ports); ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); usb_hcd_start_port_resume(&hcd->self, i); diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 118edb7bdca2..d96d69f805ea 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -471,10 +471,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd) ehci_writel(ehci, temp, &ehci->regs->port_status [i]); } - /* msleep for 20ms only if code is trying to resume port */ + /* + * msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume + * port + */ if (resume_needed) { spin_unlock_irq(&ehci->lock); - msleep(20); + msleep(USB_RESUME_TIMEOUT); spin_lock_irq(&ehci->lock); if (ehci->shutdown) goto shutdown; @@ -942,7 +945,7 @@ int ehci_hub_control( temp &= ~PORT_WAKE_BITS; ehci_writel(ehci, temp | PORT_RESUME, status_reg); ehci->reset_done[wIndex] = jiffies - + msecs_to_jiffies(20); + + msecs_to_jiffies(USB_RESUME_TIMEOUT); set_bit(wIndex, &ehci->resuming_ports); usb_hcd_start_port_resume(&hcd->self, wIndex); break; diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c index ecf02b2623e8..c4794e378ea7 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/host/fotg210-hcd.c @@ -1595,7 +1595,7 @@ static int fotg210_hub_control( /* resume signaling for 20 msec */ fotg210_writel(fotg210, temp | PORT_RESUME, status_reg); fotg210->reset_done[wIndex] = jiffies - + msecs_to_jiffies(20); + + msecs_to_jiffies(USB_RESUME_TIMEOUT); break; case USB_PORT_FEAT_C_SUSPEND: clear_bit(wIndex, &fotg210->port_c_suspend); diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c index 664d2aa1239c..c9eb18b9973c 100644 --- a/drivers/usb/host/fusbh200-hcd.c +++ b/drivers/usb/host/fusbh200-hcd.c @@ -1550,10 +1550,9 @@ static int fusbh200_hub_control ( if ((temp & PORT_PE) == 0) goto error; - /* resume signaling for 20 msec */ fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg); fusbh200->reset_done[wIndex] = jiffies - + msecs_to_jiffies(20); + + msecs_to_jiffies(USB_RESUME_TIMEOUT); break; case USB_PORT_FEAT_C_SUSPEND: clear_bit(wIndex, &fusbh200->port_c_suspend); diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index 31c9c4d0fa0b..1613a1f69480 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c @@ -1487,7 +1487,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd) spin_unlock_irq(&isp116x->lock); hcd->state = HC_STATE_RESUMING; - msleep(20); + msleep(USB_RESUME_TIMEOUT); /* Go operational */ spin_lock_irq(&isp116x->lock); diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index 036924e640f5..3fc560c30d08 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -2500,11 +2500,12 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd) || oxu->reset_done[i] != 0) continue; - /* start 20 msec resume signaling from this port, - * and make hub_wq collect PORT_STAT_C_SUSPEND to + /* start USB_RESUME_TIMEOUT resume signaling from this + * port, and make hub_wq collect PORT_STAT_C_SUSPEND to * stop that signaling. */ - oxu->reset_done[i] = jiffies + msecs_to_jiffies(20); + oxu->reset_done[i] = jiffies + + msecs_to_jiffies(USB_RESUME_TIMEOUT); oxu_dbg(oxu, "port %d remote wakeup\n", i + 1); mod_timer(&hcd->rh_timer, oxu->reset_done[i]); } diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index c4bcfaedeec9..2dcf197d304c 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -2300,7 +2300,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd) rh->port &= ~USB_PORT_STAT_SUSPEND; rh->port |= USB_PORT_STAT_C_SUSPEND << 16; r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg); - msleep(50); + msleep(USB_RESUME_TIMEOUT); r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg); } diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index 25fb1da8d3d7..573355498193 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -1259,7 +1259,7 @@ sl811h_hub_control( sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1); mod_timer(&sl811->timer, jiffies - + msecs_to_jiffies(20)); + + msecs_to_jiffies(USB_RESUME_TIMEOUT)); break; case USB_PORT_FEAT_POWER: port_power(sl811, 0); diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c index 93e17b12fb33..98c66d88ebde 100644 --- a/drivers/usb/host/uhci-hub.c +++ b/drivers/usb/host/uhci-hub.c @@ -165,7 +165,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci) /* Port received a wakeup request */ set_bit(port, &uhci->resuming_ports); uhci->ports_timeout = jiffies + - msecs_to_jiffies(25); + msecs_to_jiffies(USB_RESUME_TIMEOUT); usb_hcd_start_port_resume( &uhci_to_hcd(uhci)->self, port); @@ -337,7 +337,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, uhci_finish_suspend(uhci, port, port_addr); /* USB v2.0 7.1.7.5 */ - uhci->ports_timeout = jiffies + msecs_to_jiffies(50); + uhci->ports_timeout = jiffies + + msecs_to_jiffies(USB_RESUME_TIMEOUT); break; case USB_PORT_FEAT_POWER: /* UHCI has no power switching */ diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 2a924d500d8a..7dd25cedef94 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1574,7 +1574,7 @@ static void handle_port_status(struct xhci_hcd *xhci, } else { xhci_dbg(xhci, "resume HS port %d\n", port_id); bus_state->resume_done[faked_port_index] = jiffies + - msecs_to_jiffies(20); + msecs_to_jiffies(USB_RESUME_TIMEOUT); set_bit(faked_port_index, &bus_state->resuming_ports); mod_timer(&hcd->rh_timer, bus_state->resume_done[faked_port_index]); diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 34cce3e38c49..fa5185868c05 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -99,6 +99,7 @@ #include #include #include +#include #include "musb_core.h" @@ -562,7 +563,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, (USB_PORT_STAT_C_SUSPEND << 16) | MUSB_PORT_STAT_RESUME; musb->rh_timer = jiffies - + msecs_to_jiffies(20); + + msecs_to_jiffies(USB_RESUME_TIMEOUT); musb->need_finish_resume = 1; musb->xceiv->otg->state = OTG_STATE_A_HOST; @@ -1596,16 +1597,30 @@ irqreturn_t musb_interrupt(struct musb *musb) is_host_active(musb) ? "host" : "peripheral", musb->int_usb, musb->int_tx, musb->int_rx); - /* the core can interrupt us for multiple reasons; docs have - * a generic interrupt flowchart to follow + /** + * According to Mentor Graphics' documentation, flowchart on page 98, + * IRQ should be handled as follows: + * + * . Resume IRQ + * . Session Request IRQ + * . VBUS Error IRQ + * . Suspend IRQ + * . Connect IRQ + * . Disconnect IRQ + * . Reset/Babble IRQ + * . SOF IRQ (we're not using this one) + * . Endpoint 0 IRQ + * . TX Endpoints + * . RX Endpoints + * + * We will be following that flowchart in order to avoid any problems + * that might arise with internal Finite State Machine. */ + if (musb->int_usb) retval |= musb_stage0_irq(musb, musb->int_usb, devctl); - /* "stage 1" is handling endpoint irqs */ - - /* handle endpoint 0 first */ if (musb->int_tx & 1) { if (is_host_active(musb)) retval |= musb_h_ep0_irq(musb); @@ -1613,37 +1628,31 @@ irqreturn_t musb_interrupt(struct musb *musb) retval |= musb_g_ep0_irq(musb); } - /* RX on endpoints 1-15 */ - reg = musb->int_rx >> 1; + reg = musb->int_tx >> 1; ep_num = 1; while (reg) { if (reg & 1) { - /* musb_ep_select(musb->mregs, ep_num); */ - /* REVISIT just retval = ep->rx_irq(...) */ retval = IRQ_HANDLED; if (is_host_active(musb)) - musb_host_rx(musb, ep_num); + musb_host_tx(musb, ep_num); else - musb_g_rx(musb, ep_num); + musb_g_tx(musb, ep_num); } - reg >>= 1; ep_num++; } - /* TX on endpoints 1-15 */ - reg = musb->int_tx >> 1; + reg = musb->int_rx >> 1; ep_num = 1; while (reg) { if (reg & 1) { - /* musb_ep_select(musb->mregs, ep_num); */ - /* REVISIT just retval |= ep->tx_irq(...) */ retval = IRQ_HANDLED; if (is_host_active(musb)) - musb_host_tx(musb, ep_num); + musb_host_rx(musb, ep_num); else - musb_g_tx(musb, ep_num); + musb_g_rx(musb, ep_num); } + reg >>= 1; ep_num++; } @@ -2460,7 +2469,7 @@ static int musb_resume(struct device *dev) if (musb->need_finish_resume) { musb->need_finish_resume = 0; schedule_delayed_work(&musb->finish_resume_work, - msecs_to_jiffies(20)); + msecs_to_jiffies(USB_RESUME_TIMEOUT)); } /* diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index b072420e44f5..a9c47315f06b 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -137,7 +137,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend) /* later, GetPortStatus will stop RESUME signaling */ musb->port1_status |= MUSB_PORT_STAT_RESUME; schedule_delayed_work(&musb->finish_resume_work, - msecs_to_jiffies(20)); + msecs_to_jiffies(USB_RESUME_TIMEOUT)); } } diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c index 2f9735b35338..d1cd6b50f520 100644 --- a/drivers/usb/phy/phy.c +++ b/drivers/usb/phy/phy.c @@ -81,7 +81,9 @@ static void devm_usb_phy_release(struct device *dev, void *res) static int devm_usb_phy_match(struct device *dev, void *res, void *match_data) { - return res == match_data; + struct usb_phy **phy = res; + + return *phy == match_data; } /** diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 995986b8e36b..d925f55e4857 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -862,6 +862,7 @@ static int load_elf_binary(struct linux_binprm *bprm) i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { int elf_prot = 0, elf_flags; unsigned long k, vaddr; + unsigned long total_size = 0; if (elf_ppnt->p_type != PT_LOAD) continue; @@ -924,10 +925,16 @@ static int load_elf_binary(struct linux_binprm *bprm) #else load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); #endif + total_size = total_mapping_size(elf_phdata, + loc->elf_ex.e_phnum); + if (!total_size) { + error = -EINVAL; + goto out_free_dentry; + } } error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, - elf_prot, elf_flags, 0); + elf_prot, elf_flags, total_size); if (BAD_ADDR(error)) { retval = IS_ERR((void *)error) ? PTR_ERR((void*)error) : -EINVAL; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a684086c3c81..4623a55d9c5b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6967,12 +6967,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root, return -ENOSPC; } - if (btrfs_test_opt(root, DISCARD)) - ret = btrfs_discard_extent(root, start, len, NULL); - if (pin) pin_down_extent(root, cache, start, len, 1); else { + if (btrfs_test_opt(root, DISCARD)) + ret = btrfs_discard_extent(root, start, len, NULL); btrfs_add_free_space(cache, start, len); btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc); } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d49fe8a0f6b5..09a566acc10f 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2897,6 +2897,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len, if (src == dst) return -EINVAL; + if (len == 0) + return 0; + btrfs_double_lock(src, loff, dst, dst_loff, len); ret = extent_same_check_offsets(src, loff, len); @@ -3626,6 +3629,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, if (off + len == src->i_size) len = ALIGN(src->i_size, bs) - off; + if (len == 0) { + ret = 0; + goto out_unlock; + } + /* verify the end result is block aligned */ if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) || !IS_ALIGNED(destoff, bs)) diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 47b19465f0dc..01bad724b5f7 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -360,22 +360,42 @@ const struct xattr_handler *btrfs_xattr_handlers[] = { /* * Check if the attribute is in a supported namespace. * - * This applied after the check for the synthetic attributes in the system + * This is applied after the check for the synthetic attributes in the system * namespace. */ -static bool btrfs_is_valid_xattr(const char *name) +static int btrfs_is_valid_xattr(const char *name) { - return !strncmp(name, XATTR_SECURITY_PREFIX, - XATTR_SECURITY_PREFIX_LEN) || - !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) || - !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || - !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) || - !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN); + int len = strlen(name); + int prefixlen = 0; + + if (!strncmp(name, XATTR_SECURITY_PREFIX, + XATTR_SECURITY_PREFIX_LEN)) + prefixlen = XATTR_SECURITY_PREFIX_LEN; + else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) + prefixlen = XATTR_SYSTEM_PREFIX_LEN; + else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) + prefixlen = XATTR_TRUSTED_PREFIX_LEN; + else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) + prefixlen = XATTR_USER_PREFIX_LEN; + else if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN)) + prefixlen = XATTR_BTRFS_PREFIX_LEN; + else + return -EOPNOTSUPP; + + /* + * The name cannot consist of just prefix + */ + if (len <= prefixlen) + return -EINVAL; + + return 0; } ssize_t btrfs_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size) { + int ret; + /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler @@ -384,8 +404,9 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name, if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_getxattr(dentry, name, buffer, size); - if (!btrfs_is_valid_xattr(name)) - return -EOPNOTSUPP; + ret = btrfs_is_valid_xattr(name); + if (ret) + return ret; return __btrfs_getxattr(dentry->d_inode, name, buffer, size); } @@ -393,6 +414,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; + int ret; /* * The permission on security.* and system.* is not checked @@ -409,8 +431,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_setxattr(dentry, name, value, size, flags); - if (!btrfs_is_valid_xattr(name)) - return -EOPNOTSUPP; + ret = btrfs_is_valid_xattr(name); + if (ret) + return ret; if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN)) return btrfs_set_prop(dentry->d_inode, name, @@ -426,6 +449,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, int btrfs_removexattr(struct dentry *dentry, const char *name) { struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; + int ret; /* * The permission on security.* and system.* is not checked @@ -442,8 +466,9 @@ int btrfs_removexattr(struct dentry *dentry, const char *name) if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return generic_removexattr(dentry, name); - if (!btrfs_is_valid_xattr(name)) - return -EOPNOTSUPP; + ret = btrfs_is_valid_xattr(name); + if (ret) + return ret; if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN)) return btrfs_set_prop(dentry->d_inode, name, diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 2291923dae4e..5e8ae7811220 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1865,7 +1865,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode) { struct inode *dir = dentry->d_parent->d_inode; - struct buffer_head *bh; + struct buffer_head *bh = NULL; struct ext4_dir_entry_2 *de; struct ext4_dir_entry_tail *t; struct super_block *sb; @@ -1889,14 +1889,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, return retval; if (retval == 1) { retval = 0; - return retval; + goto out; } } if (is_dx(dir)) { retval = ext4_dx_add_entry(handle, dentry, inode); if (!retval || (retval != ERR_BAD_DX_DIR)) - return retval; + goto out; ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); dx_fallback++; ext4_mark_inode_dirty(handle, dir); @@ -1908,14 +1908,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, return PTR_ERR(bh); retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh); - if (retval != -ENOSPC) { - brelse(bh); - return retval; - } + if (retval != -ENOSPC) + goto out; if (blocks == 1 && !dx_fallback && - EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) - return make_indexed_dir(handle, dentry, inode, bh); + EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) { + retval = make_indexed_dir(handle, dentry, inode, bh); + bh = NULL; /* make_indexed_dir releases bh */ + goto out; + } brelse(bh); } bh = ext4_append(handle, dir, &block); @@ -1931,6 +1932,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, } retval = add_dirent_to_buf(handle, dentry, inode, de, bh); +out: brelse(bh); if (retval == 0) ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY); diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index d12ff4e2dbe7..251a1f04be2f 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -31,7 +31,7 @@ static struct hlist_head nlm_files[FILE_NRHASH]; static DEFINE_MUTEX(nlm_file_mutex); -#ifdef NFSD_DEBUG +#ifdef CONFIG_SUNRPC_DEBUG static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f) { u32 *fhp = (u32*)f->data; diff --git a/fs/namei.c b/fs/namei.c index bc35b02883bb..3abfbda53574 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1585,7 +1585,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path, if (should_follow_link(path->dentry, follow)) { if (nd->flags & LOOKUP_RCU) { - if (unlikely(unlazy_walk(nd, path->dentry))) { + if (unlikely(nd->path.mnt != path->mnt || + unlazy_walk(nd, path->dentry))) { err = -ECHILD; goto out_err; } @@ -3028,7 +3029,8 @@ finish_lookup: if (should_follow_link(path->dentry, !symlink_ok)) { if (nd->flags & LOOKUP_RCU) { - if (unlikely(unlazy_walk(nd, path->dentry))) { + if (unlikely(nd->path.mnt != path->mnt || + unlazy_walk(nd, path->dentry))) { error = -ECHILD; goto out; } diff --git a/fs/namespace.c b/fs/namespace.c index cd1e9681a0cf..07d0562290a5 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1323,14 +1323,15 @@ static inline void namespace_lock(void) down_write(&namespace_sem); } +enum umount_tree_flags { + UMOUNT_SYNC = 1, + UMOUNT_PROPAGATE = 2, +}; /* * mount_lock must be held * namespace_sem must be held for write - * how = 0 => just this tree, don't propagate - * how = 1 => propagate; we know that nobody else has reference to any victims - * how = 2 => lazy umount */ -void umount_tree(struct mount *mnt, int how) +static void umount_tree(struct mount *mnt, enum umount_tree_flags how) { HLIST_HEAD(tmp_list); struct mount *p; @@ -1344,7 +1345,7 @@ void umount_tree(struct mount *mnt, int how) hlist_for_each_entry(p, &tmp_list, mnt_hash) list_del_init(&p->mnt_child); - if (how) + if (how & UMOUNT_PROPAGATE) propagate_umount(&tmp_list); hlist_for_each_entry(p, &tmp_list, mnt_hash) { @@ -1352,7 +1353,7 @@ void umount_tree(struct mount *mnt, int how) list_del_init(&p->mnt_list); __touch_mnt_namespace(p->mnt_ns); p->mnt_ns = NULL; - if (how < 2) + if (how & UMOUNT_SYNC) p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; if (mnt_has_parent(p)) { hlist_del_init(&p->mnt_mp_list); @@ -1457,14 +1458,14 @@ static int do_umount(struct mount *mnt, int flags) if (flags & MNT_DETACH) { if (!list_empty(&mnt->mnt_list)) - umount_tree(mnt, 2); + umount_tree(mnt, UMOUNT_PROPAGATE); retval = 0; } else { shrink_submounts(mnt); retval = -EBUSY; if (!propagate_mount_busy(mnt, 2)) { if (!list_empty(&mnt->mnt_list)) - umount_tree(mnt, 1); + umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); retval = 0; } } @@ -1496,7 +1497,7 @@ void __detach_mounts(struct dentry *dentry) lock_mount_hash(); while (!hlist_empty(&mp->m_list)) { mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); - umount_tree(mnt, 2); + umount_tree(mnt, 0); } unlock_mount_hash(); put_mountpoint(mp); @@ -1658,7 +1659,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, out: if (res) { lock_mount_hash(); - umount_tree(res, 0); + umount_tree(res, UMOUNT_SYNC); unlock_mount_hash(); } return q; @@ -1682,7 +1683,7 @@ void drop_collected_mounts(struct vfsmount *mnt) { namespace_lock(); lock_mount_hash(); - umount_tree(real_mount(mnt), 0); + umount_tree(real_mount(mnt), UMOUNT_SYNC); unlock_mount_hash(); namespace_unlock(); } @@ -1865,7 +1866,7 @@ static int attach_recursive_mnt(struct mount *source_mnt, out_cleanup_ids: while (!hlist_empty(&tree_list)) { child = hlist_entry(tree_list.first, struct mount, mnt_hash); - umount_tree(child, 0); + umount_tree(child, UMOUNT_SYNC); } unlock_mount_hash(); cleanup_group_ids(source_mnt, NULL); @@ -2045,7 +2046,7 @@ static int do_loopback(struct path *path, const char *old_name, err = graft_tree(mnt, parent, mp); if (err) { lock_mount_hash(); - umount_tree(mnt, 0); + umount_tree(mnt, UMOUNT_SYNC); unlock_mount_hash(); } out2: @@ -2416,7 +2417,7 @@ void mark_mounts_for_expiry(struct list_head *mounts) while (!list_empty(&graveyard)) { mnt = list_first_entry(&graveyard, struct mount, mnt_expire); touch_mnt_namespace(mnt->mnt_ns); - umount_tree(mnt, 1); + umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); } unlock_mount_hash(); namespace_unlock(); @@ -2487,7 +2488,7 @@ static void shrink_submounts(struct mount *mnt) m = list_first_entry(&graveyard, struct mount, mnt_expire); touch_mnt_namespace(m->mnt_ns); - umount_tree(m, 1); + umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC); } } } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index cb4376b78ed9..5303646f1e7a 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -7338,6 +7338,11 @@ nfs4_stat_to_errno(int stat) .p_name = #proc, \ } +#define STUB(proc) \ +[NFSPROC4_CLNT_##proc] = { \ + .p_name = #proc, \ +} + struct rpc_procinfo nfs4_procedures[] = { PROC(READ, enc_read, dec_read), PROC(WRITE, enc_write, dec_write), @@ -7390,6 +7395,7 @@ struct rpc_procinfo nfs4_procedures[] = { PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name), PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid), PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid), + STUB(GETDEVICELIST), PROC(BIND_CONN_TO_SESSION, enc_bind_conn_to_session, dec_bind_conn_to_session), PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid), diff --git a/fs/nfs/read.c b/fs/nfs/read.c index c91a4799c562..beff2769c5c5 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -269,7 +269,7 @@ int nfs_readpage(struct file *file, struct page *page) dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", page, PAGE_CACHE_SIZE, page_file_index(page)); nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); - nfs_inc_stats(inode, NFSIOS_READPAGES); + nfs_add_stats(inode, NFSIOS_READPAGES, 1); /* * Try to flush any pending writes to the file.. diff --git a/fs/nfs/write.c b/fs/nfs/write.c index af3af685a9e3..d489ff3f438f 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -575,7 +575,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st int ret; nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); - nfs_inc_stats(inode, NFSIOS_WRITEPAGES); + nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); nfs_pageio_cond_complete(pgio, page_file_index(page)); ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index ac71d13c69ef..2cd2d64ff1e8 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1028,6 +1028,8 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n"); return status; } + if (!file) + return nfserr_bad_stateid; status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file, fallocate->falloc_offset, @@ -1067,6 +1069,8 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n"); return status; } + if (!file) + return nfserr_bad_stateid; switch (seek->seek_whence) { case NFS4_CONTENT_DATA: diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 7cfb905a1e90..ddacc436a77a 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1162,7 +1162,7 @@ hash_sessionid(struct nfs4_sessionid *sessionid) return sid->sequence % SESSION_HASH_SIZE; } -#ifdef NFSD_DEBUG +#ifdef CONFIG_SUNRPC_DEBUG static inline void dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) { diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 15f7b73e0c0f..281d12640c0b 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3246,6 +3246,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, unsigned long maxcount; struct xdr_stream *xdr = &resp->xdr; struct file *file = read->rd_filp; + struct svc_fh *fhp = read->rd_fhp; int starting_len = xdr->buf->len; struct raparms *ra; __be32 *p; @@ -3269,12 +3270,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, maxcount = min_t(unsigned long, maxcount, (xdr->buf->buflen - xdr->buf->len)); maxcount = min_t(unsigned long, maxcount, read->rd_length); - if (!read->rd_filp) { + if (read->rd_filp) + err = nfsd_permission(resp->rqstp, fhp->fh_export, + fhp->fh_dentry, + NFSD_MAY_READ|NFSD_MAY_OWNER_OVERRIDE); + else err = nfsd_get_tmp_read_open(resp->rqstp, read->rd_fhp, &file, &ra); - if (err) - goto err_truncate; - } + if (err) + goto err_truncate; if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)) err = nfsd4_encode_splice_read(resp, read, file, maxcount); diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 33a46a8dfaf7..403d8e14fcaf 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -24,7 +24,7 @@ #include "export.h" #undef ifdebug -#ifdef NFSD_DEBUG +#ifdef CONFIG_SUNRPC_DEBUG # define ifdebug(flag) if (nfsd_debug & NFSDDBG_##flag) #else # define ifdebug(flag) if (0) diff --git a/fs/open.c b/fs/open.c index 813be037b412..d45a7b919504 100644 --- a/fs/open.c +++ b/fs/open.c @@ -570,6 +570,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group) uid = make_kuid(current_user_ns(), user); gid = make_kgid(current_user_ns(), group); +retry_deleg: newattrs.ia_valid = ATTR_CTIME; if (user != (uid_t) -1) { if (!uid_valid(uid)) @@ -586,7 +587,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group) if (!S_ISDIR(inode->i_mode)) newattrs.ia_valid |= ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV; -retry_deleg: mutex_lock(&inode->i_mutex); error = security_path_chown(path, uid, gid); if (!error) diff --git a/fs/pnode.h b/fs/pnode.h index 4a246358b031..16afc3d6d2f2 100644 --- a/fs/pnode.h +++ b/fs/pnode.h @@ -47,7 +47,6 @@ int get_dominating_id(struct mount *mnt, const struct path *root); unsigned int mnt_get_count(struct mount *mnt); void mnt_set_mountpoint(struct mount *, struct mountpoint *, struct mount *); -void umount_tree(struct mount *, int); struct mount *copy_tree(struct mount *, struct dentry *, int); bool is_path_reachable(struct mount *, struct dentry *, const struct path *root); diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index bbef17368e49..9f2847cdf3ce 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -199,9 +199,29 @@ typedef int s32; typedef s32 acpi_native_int; typedef u32 acpi_size; + +#ifdef ACPI_32BIT_PHYSICAL_ADDRESS + +/* + * OSPMs can define this to shrink the size of the structures for 32-bit + * none PAE environment. ASL compiler may always define this to generate + * 32-bit OSPM compliant tables. + */ typedef u32 acpi_io_address; typedef u32 acpi_physical_address; +#else /* ACPI_32BIT_PHYSICAL_ADDRESS */ + +/* + * It is reported that, after some calculations, the physical addresses can + * wrap over the 32-bit boundary on 32-bit PAE environment. + * https://bugzilla.kernel.org/show_bug.cgi?id=87971 + */ +typedef u64 acpi_io_address; +typedef u64 acpi_physical_address; + +#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */ + #define ACPI_MAX_PTR ACPI_UINT32_MAX #define ACPI_SIZE_MAX ACPI_UINT32_MAX diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h index 5f8cc1fa3278..9e1ed2e380b6 100644 --- a/include/acpi/platform/acenv.h +++ b/include/acpi/platform/acenv.h @@ -76,6 +76,7 @@ #define ACPI_LARGE_NAMESPACE_NODE #define ACPI_DATA_TABLE_DISASSEMBLY #define ACPI_SINGLE_THREADED +#define ACPI_32BIT_PHYSICAL_ADDRESS #endif /* acpi_exec configuration. Multithreaded with full AML debugger */ diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index ac4888dc86bc..36e0f219e507 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -113,6 +113,7 @@ struct vgic_ops { void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); u64 (*get_eisr)(const struct kvm_vcpu *vcpu); + void (*clear_eisr)(struct kvm_vcpu *vcpu); u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); void (*enable_underflow)(struct kvm_vcpu *vcpu); void (*disable_underflow)(struct kvm_vcpu *vcpu); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index bbfceb756452..33b52fb0e20f 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -48,7 +48,7 @@ struct bpf_map *bpf_map_get(struct fd f); /* function argument constraints */ enum bpf_arg_type { - ARG_ANYTHING = 0, /* any argument is ok */ + ARG_DONTCARE = 0, /* unused argument in helper function */ /* the following constraints used to prototype * bpf_map_lookup/update/delete_elem() functions @@ -62,6 +62,8 @@ enum bpf_arg_type { */ ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ + + ARG_ANYTHING, /* any (initialized) argument is ok */ }; /* type of values returned from helper functions */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 85ab7d72b54c..5fcc6064247b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -762,6 +762,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, int node); +struct sk_buff *__build_skb(void *data, unsigned int frag_size); struct sk_buff *build_skb(void *data, unsigned int frag_size); static inline struct sk_buff *alloc_skb(unsigned int size, gfp_t priority) @@ -2998,6 +2999,18 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, */ #define CHECKSUM_BREAK 76 +/* Unset checksum-complete + * + * Unset checksum complete can be done when packet is being modified + * (uncompressed for instance) and checksum-complete value is + * invalidated. + */ +static inline void skb_checksum_complete_unset(struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; +} + /* Validate (init) checksum based on checksum complete. * * Return values: diff --git a/include/linux/usb.h b/include/linux/usb.h index 058a7698d7e3..335b413e5980 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -205,6 +205,32 @@ void usb_put_intf(struct usb_interface *intf); #define USB_MAXINTERFACES 32 #define USB_MAXIADS (USB_MAXINTERFACES/2) +/* + * USB Resume Timer: Every Host controller driver should drive the resume + * signalling on the bus for the amount of time defined by this macro. + * + * That way we will have a 'stable' behavior among all HCDs supported by Linux. + * + * Note that the USB Specification states we should drive resume for *at least* + * 20 ms, but it doesn't give an upper bound. This creates two possible + * situations which we want to avoid: + * + * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes + * us to fail USB Electrical Tests, thus failing Certification + * + * (b) Some (many) devices actually need more than 20 ms of resume signalling, + * and while we can argue that's against the USB Specification, we don't have + * control over which devices a certification laboratory will be using for + * certification. If CertLab uses a device which was tested against Windows and + * that happens to have relaxed resume signalling rules, we might fall into + * situations where we fail interoperability and electrical tests. + * + * In order to avoid both conditions, we're using a 40 ms resume timeout, which + * should cope with both LPJ calibration errors and devices not following every + * detail of the USB Specification. + */ +#define USB_RESUME_TIMEOUT 40 /* ms */ + /** * struct usb_interface_cache - long-term representation of a device interface * @num_altsetting: number of altsettings defined. diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 672150b6aaf5..985ca4c907fe 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -524,7 +524,7 @@ struct se_cmd { sense_reason_t (*execute_cmd)(struct se_cmd *); sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *, u32, enum dma_data_direction); - sense_reason_t (*transport_complete_callback)(struct se_cmd *); + sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); unsigned char *t_task_cdb; unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h index 1fdc95bb2375..c021bbaf21b7 100644 --- a/include/uapi/linux/nfsd/debug.h +++ b/include/uapi/linux/nfsd/debug.h @@ -12,14 +12,6 @@ #include /* - * Enable debugging for nfsd. - * Requires RPC_DEBUG. - */ -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) -# define NFSD_DEBUG 1 -#endif - -/* * knfsd debug flags */ #define NFSDDBG_SOCK 0x0001 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 36508e69e92a..5d8ea3d8a897 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -755,7 +755,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno, enum bpf_reg_type expected_type; int err = 0; - if (arg_type == ARG_ANYTHING) + if (arg_type == ARG_DONTCARE) return 0; if (reg->type == NOT_INIT) { @@ -763,6 +763,9 @@ static int check_func_arg(struct verifier_env *env, u32 regno, return -EACCES; } + if (arg_type == ARG_ANYTHING) + return 0; + if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE) { expected_type = PTR_TO_STACK; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 1eb9d90c3af9..5009263eba97 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -697,6 +697,8 @@ static int ptrace_peek_siginfo(struct task_struct *child, static int ptrace_resume(struct task_struct *child, long request, unsigned long data) { + bool need_siglock; + if (!valid_signal(data)) return -EIO; @@ -724,8 +726,26 @@ static int ptrace_resume(struct task_struct *child, long request, user_disable_single_step(child); } + /* + * Change ->exit_code and ->state under siglock to avoid the race + * with wait_task_stopped() in between; a non-zero ->exit_code will + * wrongly look like another report from tracee. + * + * Note that we need siglock even if ->exit_code == data and/or this + * status was not reported yet, the new status must not be cleared by + * wait_task_stopped() after resume. + * + * If data == 0 we do not care if wait_task_stopped() reports the old + * status and clears the code too; this can't race with the tracee, it + * takes siglock after resume. + */ + need_siglock = data && !thread_group_empty(current); + if (need_siglock) + spin_lock_irq(&child->sighand->siglock); child->exit_code = data; wake_up_state(child, __TASK_TRACED); + if (need_siglock) + spin_unlock_irq(&child->sighand->siglock); return 0; } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d2e151c83bd5..e896a58e18d8 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2681,7 +2681,7 @@ static DEFINE_PER_CPU(unsigned int, current_context); static __always_inline int trace_recursive_lock(void) { - unsigned int val = this_cpu_read(current_context); + unsigned int val = __this_cpu_read(current_context); int bit; if (in_interrupt()) { @@ -2698,18 +2698,17 @@ static __always_inline int trace_recursive_lock(void) return 1; val |= (1 << bit); - this_cpu_write(current_context, val); + __this_cpu_write(current_context, val); return 0; } static __always_inline void trace_recursive_unlock(void) { - unsigned int val = this_cpu_read(current_context); + unsigned int val = __this_cpu_read(current_context); - val--; - val &= this_cpu_read(current_context); - this_cpu_write(current_context, val); + val &= val & (val - 1); + __this_cpu_write(current_context, val); } #else diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index b03a0ea77b99..6f7e40d6fdec 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -565,6 +565,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) { char *event = NULL, *sub = NULL, *match; + int ret; /* * The buf format can be : @@ -590,7 +591,13 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) event = NULL; } - return __ftrace_set_clr_event(tr, match, sub, event, set); + ret = __ftrace_set_clr_event(tr, match, sub, event, set); + + /* Put back the colon to allow this to be called again */ + if (buf) + *(buf - 1) = ':'; + + return ret; } /** diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index ba476009e5de..224af0a51803 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -1309,15 +1309,19 @@ void graph_trace_open(struct trace_iterator *iter) { /* pid and depth on the last trace processed */ struct fgraph_data *data; + gfp_t gfpflags; int cpu; iter->private = NULL; - data = kzalloc(sizeof(*data), GFP_KERNEL); + /* We can be called in atomic context via ftrace_dump() */ + gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; + + data = kzalloc(sizeof(*data), gfpflags); if (!data) goto out_err; - data->cpu_data = alloc_percpu(struct fgraph_cpu_data); + data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags); if (!data->cpu_data) goto out_err_free; diff --git a/lib/string.c b/lib/string.c index 10063300b830..643b0a90802c 100644 --- a/lib/string.c +++ b/lib/string.c @@ -610,7 +610,7 @@ EXPORT_SYMBOL(memset); void memzero_explicit(void *s, size_t count) { memset(s, 0, count); - OPTIMIZER_HIDE_VAR(s); + barrier(); } EXPORT_SYMBOL(memzero_explicit); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index c190d22b6b3d..ba2bd0a9a285 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -651,6 +651,13 @@ static int br_nf_forward_finish(struct sk_buff *skb) struct net_device *in; if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) { + int frag_max_size; + + if (skb->protocol == htons(ETH_P_IP)) { + frag_max_size = IPCB(skb)->frag_max_size; + BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size; + } + in = nf_bridge->physindev; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; @@ -710,8 +717,14 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops, nf_bridge->mask |= BRNF_PKT_TYPE; } - if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb)) - return NF_DROP; + if (pf == NFPROTO_IPV4) { + int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size; + + if (br_parse_ip_options(skb)) + return NF_DROP; + + IPCB(skb)->frag_max_size = frag_max; + } /* The physdev module checks on this */ nf_bridge->mask |= BRNF_BRIDGED; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 39c444c1206d..3b0a8b0442b3 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -278,13 +278,14 @@ nodata: EXPORT_SYMBOL(__alloc_skb); /** - * build_skb - build a network buffer + * __build_skb - build a network buffer * @data: data buffer provided by caller - * @frag_size: size of fragment, or 0 if head was kmalloced + * @frag_size: size of data, or 0 if head was kmalloced * * Allocate a new &sk_buff. Caller provides space holding head and * skb_shared_info. @data must have been allocated by kmalloc() only if - * @frag_size is 0, otherwise data should come from the page allocator. + * @frag_size is 0, otherwise data should come from the page allocator + * or vmalloc() * The return is the new skb buffer. * On a failure the return is %NULL, and @data is not freed. * Notes : @@ -295,7 +296,7 @@ EXPORT_SYMBOL(__alloc_skb); * before giving packet to stack. * RX rings only contains data buffers, not full skbs. */ -struct sk_buff *build_skb(void *data, unsigned int frag_size) +struct sk_buff *__build_skb(void *data, unsigned int frag_size) { struct skb_shared_info *shinfo; struct sk_buff *skb; @@ -309,7 +310,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) memset(skb, 0, offsetof(struct sk_buff, tail)); skb->truesize = SKB_TRUESIZE(size); - skb->head_frag = frag_size != 0; atomic_set(&skb->users, 1); skb->head = data; skb->data = data; @@ -326,6 +326,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) return skb; } + +/* build_skb() is wrapper over __build_skb(), that specifically + * takes care of skb->head and skb->pfmemalloc + * This means that if @frag_size is not zero, then @data must be backed + * by a page fragment, not kmalloc() or vmalloc() + */ +struct sk_buff *build_skb(void *data, unsigned int frag_size) +{ + struct sk_buff *skb = __build_skb(data, frag_size); + + if (skb && frag_size) { + skb->head_frag = 1; + if (virt_to_head_page(data)->pfmemalloc) + skb->pfmemalloc = 1; + } + return skb; +} EXPORT_SYMBOL(build_skb); struct netdev_alloc_cache { @@ -346,7 +363,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc, gfp_t gfp = gfp_mask; if (order) { - gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY; + gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | + __GFP_NOMEMALLOC; page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); nc->frag.size = PAGE_SIZE << (page ? order : 0); } diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 787b3c294ce6..d5410b57da19 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -81,6 +81,9 @@ int ip_forward(struct sk_buff *skb) if (skb->pkt_type != PACKET_HOST) goto drop; + if (unlikely(skb->sk)) + goto drop; + if (skb_warn_if_lro(skb)) goto drop; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 9f29453049dc..e625be562d3c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2753,39 +2753,65 @@ begin_fwd: } } -/* Send a fin. The caller locks the socket for us. This cannot be - * allowed to fail queueing a FIN frame under any circumstances. +/* We allow to exceed memory limits for FIN packets to expedite + * connection tear down and (memory) recovery. + * Otherwise tcp_send_fin() could be tempted to either delay FIN + * or even be forced to close flow without any FIN. + */ +static void sk_forced_wmem_schedule(struct sock *sk, int size) +{ + int amt, status; + + if (size <= sk->sk_forward_alloc) + return; + amt = sk_mem_pages(size); + sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; + sk_memory_allocated_add(sk, amt, &status); +} + +/* Send a FIN. The caller locks the socket for us. + * We should try to send a FIN packet really hard, but eventually give up. */ void tcp_send_fin(struct sock *sk) { + struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb = tcp_write_queue_tail(sk); - int mss_now; - /* Optimization, tack on the FIN if we have a queue of - * unsent frames. But be careful about outgoing SACKS - * and IP options. + /* Optimization, tack on the FIN if we have one skb in write queue and + * this skb was not yet sent, or we are under memory pressure. + * Note: in the latter case, FIN packet will be sent after a timeout, + * as TCP stack thinks it has already been transmitted. */ - mss_now = tcp_current_mss(sk); - - if (tcp_send_head(sk) != NULL) { - TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; - TCP_SKB_CB(skb)->end_seq++; + if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) { +coalesce: + TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; + TCP_SKB_CB(tskb)->end_seq++; tp->write_seq++; + if (!tcp_send_head(sk)) { + /* This means tskb was already sent. + * Pretend we included the FIN on previous transmit. + * We need to set tp->snd_nxt to the value it would have + * if FIN had been sent. This is because retransmit path + * does not change tp->snd_nxt. + */ + tp->snd_nxt++; + return; + } } else { - /* Socket is locked, keep trying until memory is available. */ - for (;;) { - skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); - if (skb) - break; - yield(); + skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); + if (unlikely(!skb)) { + if (tskb) + goto coalesce; + return; } + skb_reserve(skb, MAX_TCP_HEADER); + sk_forced_wmem_schedule(sk, skb->truesize); /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ tcp_init_nondata_skb(skb, tp->write_seq, TCPHDR_ACK | TCPHDR_FIN); tcp_queue_skb(sk, skb); } - __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); + __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); } /* We get here when a process closes a file descriptor (either due to diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index bf4b84ff1c70..6e565ec5cfa4 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2231,7 +2231,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) else ssid_len = ssid[1]; - ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL, + ieee80211_send_probe_req(sdata, sdata->vif.addr, dst, ssid + 2, ssid_len, NULL, 0, (u32) -1, true, 0, ifmgd->associated->channel, false); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 75532efa51cd..4b4a2a4418e4 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1616,13 +1616,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size, if (data == NULL) return NULL; - skb = build_skb(data, size); + skb = __build_skb(data, size); if (skb == NULL) vfree(data); - else { - skb->head_frag = 0; + else skb->destructor = netlink_skb_destructor; - } return skb; } diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c index 2ca9f2e93139..53745f4c2bf5 100644 --- a/sound/pci/emu10k1/emuproc.c +++ b/sound/pci/emu10k1/emuproc.c @@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry, struct snd_emu10k1 *emu = entry->private_data; u32 value; u32 value2; - unsigned long flags; u32 rate; if (emu->card_capabilities->emu_model) { - spin_lock_irqsave(&emu->emu_lock, flags); snd_emu1010_fpga_read(emu, 0x38, &value); - spin_unlock_irqrestore(&emu->emu_lock, flags); if ((value & 0x1) == 0) { - spin_lock_irqsave(&emu->emu_lock, flags); snd_emu1010_fpga_read(emu, 0x2a, &value); snd_emu1010_fpga_read(emu, 0x2b, &value2); - spin_unlock_irqrestore(&emu->emu_lock, flags); rate = 0x1770000 / (((value << 5) | value2)+1); snd_iprintf(buffer, "ADAT Locked : %u\n", rate); } else { snd_iprintf(buffer, "ADAT Unlocked\n"); } - spin_lock_irqsave(&emu->emu_lock, flags); snd_emu1010_fpga_read(emu, 0x20, &value); - spin_unlock_irqrestore(&emu->emu_lock, flags); if ((value & 0x4) == 0) { - spin_lock_irqsave(&emu->emu_lock, flags); snd_emu1010_fpga_read(emu, 0x28, &value); snd_emu1010_fpga_read(emu, 0x29, &value2); - spin_unlock_irqrestore(&emu->emu_lock, flags); rate = 0x1770000 / (((value << 5) | value2)+1); snd_iprintf(buffer, "SPDIF Locked : %d\n", rate); } else { @@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry, { struct snd_emu10k1 *emu = entry->private_data; u32 value; - unsigned long flags; int i; snd_iprintf(buffer, "EMU1010 Registers:\n\n"); for(i = 0; i < 0x40; i+=1) { - spin_lock_irqsave(&emu->emu_lock, flags); snd_emu1010_fpga_read(emu, i, &value); - spin_unlock_irqrestore(&emu->emu_lock, flags); snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f); } } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 172c89996d74..39e3640e30eb 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4946,12 +4946,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), @@ -5452,6 +5454,8 @@ static int patch_alc269(struct hda_codec *codec) break; case 0x10ec0256: spec->codec_variant = ALC269_TYPE_ALC256; + spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */ + alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/ break; } @@ -5465,8 +5469,8 @@ static int patch_alc269(struct hda_codec *codec) if (err < 0) goto error; - if (!spec->gen.no_analog && spec->gen.beep_nid) - set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); + if (!spec->gen.no_analog && spec->gen.beep_nid && spec->gen.mixer_nid) + set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT); codec->patch_ops = alc_patch_ops; #ifdef CONFIG_PM diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c index 7d3a6accaf9a..e770ee6f36da 100644 --- a/sound/soc/codecs/cs4271.c +++ b/sound/soc/codecs/cs4271.c @@ -561,10 +561,10 @@ static int cs4271_codec_probe(struct snd_soc_codec *codec) if (gpio_is_valid(cs4271->gpio_nreset)) { /* Reset codec */ gpio_direction_output(cs4271->gpio_nreset, 0); - udelay(1); + mdelay(1); gpio_set_value(cs4271->gpio_nreset, 1); /* Give the codec time to wake up */ - udelay(1); + mdelay(1); } ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2, diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c index 30c673cdc12e..d043797b7979 100644 --- a/sound/soc/codecs/pcm512x.c +++ b/sound/soc/codecs/pcm512x.c @@ -261,9 +261,9 @@ static const struct soc_enum pcm512x_veds = static const struct snd_kcontrol_new pcm512x_controls[] = { SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2, PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv), -SOC_DOUBLE_TLV("Playback Volume", PCM512x_ANALOG_GAIN_CTRL, +SOC_DOUBLE_TLV("Analogue Playback Volume", PCM512x_ANALOG_GAIN_CTRL, PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv), -SOC_DOUBLE_TLV("Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST, +SOC_DOUBLE_TLV("Analogue Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST, PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv), SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT, PCM512x_RQMR_SHIFT, 1, 1), diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c index 31bb4801a005..9e71c768966f 100644 --- a/sound/soc/codecs/wm8741.c +++ b/sound/soc/codecs/wm8741.c @@ -123,7 +123,7 @@ static struct { }; static const unsigned int rates_11289[] = { - 44100, 88235, + 44100, 88200, }; static const struct snd_pcm_hw_constraint_list constraints_11289 = { @@ -150,7 +150,7 @@ static const struct snd_pcm_hw_constraint_list constraints_16384 = { }; static const unsigned int rates_16934[] = { - 44100, 88235, + 44100, 88200, }; static const struct snd_pcm_hw_constraint_list constraints_16934 = { @@ -168,7 +168,7 @@ static const struct snd_pcm_hw_constraint_list constraints_18432 = { }; static const unsigned int rates_22579[] = { - 44100, 88235, 1764000 + 44100, 88200, 176400 }; static const struct snd_pcm_hw_constraint_list constraints_22579 = { @@ -186,7 +186,7 @@ static const struct snd_pcm_hw_constraint_list constraints_24576 = { }; static const unsigned int rates_36864[] = { - 48000, 96000, 19200 + 48000, 96000, 192000 }; static const struct snd_pcm_hw_constraint_list constraints_36864 = { diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c index 158cb3d1db70..f33143f917a3 100644 --- a/sound/soc/davinci/davinci-evm.c +++ b/sound/soc/davinci/davinci-evm.c @@ -431,18 +431,8 @@ static int davinci_evm_probe(struct platform_device *pdev) return ret; } -static int davinci_evm_remove(struct platform_device *pdev) -{ - struct snd_soc_card *card = platform_get_drvdata(pdev); - - snd_soc_unregister_card(card); - - return 0; -} - static struct platform_driver davinci_evm_driver = { .probe = davinci_evm_probe, - .remove = davinci_evm_remove, .driver = { .name = "davinci_evm", .pm = &snd_soc_pm_ops, diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c index dcc665228c71..deb3569ab004 100644 --- a/tools/lib/traceevent/kbuffer-parse.c +++ b/tools/lib/traceevent/kbuffer-parse.c @@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr, switch (type_len) { case KBUFFER_TYPE_PADDING: *length = read_4(kbuf, data); - data += *length; break; case KBUFFER_TYPE_TIME_EXTEND: diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index 648e31ff4021..f97de8ee9a09 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -645,7 +645,7 @@ ifeq (${IS_64_BIT}, 1) NO_PERF_READ_VDSO32 := 1 endif endif - ifneq (${IS_X86_64}, 1) + ifneq ($(ARCH), x86) NO_PERF_READ_VDSOX32 := 1 endif ifndef NO_PERF_READ_VDSOX32 @@ -693,7 +693,7 @@ sysconfdir = $(prefix)/etc ETC_PERFCONFIG = etc/perfconfig endif ifndef lib -ifeq ($(IS_X86_64),1) +ifeq ($(ARCH)$(IS_64_BIT), x861) lib = lib64 else lib = lib diff --git a/tools/perf/tests/make b/tools/perf/tests/make index 69a71ff84e01..f8b24a2c5a96 100644 --- a/tools/perf/tests/make +++ b/tools/perf/tests/make @@ -5,7 +5,7 @@ include config/Makefile.arch # FIXME looks like x86 is the only arch running tests ;-) # we need some IS_(32/64) flag to make this generic -ifeq ($(IS_X86_64),1) +ifeq ($(ARCH)$(IS_64_BIT), x861) lib = lib64 else lib = lib diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c index 6da965bdbc2c..85b523885f9d 100644 --- a/tools/perf/util/cloexec.c +++ b/tools/perf/util/cloexec.c @@ -7,6 +7,12 @@ static unsigned long flag = PERF_FLAG_FD_CLOEXEC; +int __weak sched_getcpu(void) +{ + errno = ENOSYS; + return -1; +} + static int perf_flag_probe(void) { /* use 'safest' configuration as used in perf_evsel__fallback() */ diff --git a/tools/perf/util/cloexec.h b/tools/perf/util/cloexec.h index 94a5a7d829d5..68888c29b04a 100644 --- a/tools/perf/util/cloexec.h +++ b/tools/perf/util/cloexec.h @@ -3,4 +3,10 @@ unsigned long perf_event_open_cloexec_flag(void); +#ifdef __GLIBC_PREREQ +#if !__GLIBC_PREREQ(2, 6) +extern int sched_getcpu(void) __THROW; +#endif +#endif + #endif /* __PERF_CLOEXEC_H */ diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 06fcd1bf98b6..eafee111563b 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -69,6 +69,10 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym) return GELF_ST_TYPE(sym->st_info); } +#ifndef STT_GNU_IFUNC +#define STT_GNU_IFUNC 10 +#endif + static inline int elf_sym__is_function(const GElf_Sym *sym) { return (elf_sym__type(sym) == STT_FUNC || diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile index d1b3a361e526..4039854560d0 100644 --- a/tools/power/x86/turbostat/Makefile +++ b/tools/power/x86/turbostat/Makefile @@ -1,8 +1,12 @@ CC = $(CROSS_COMPILE)gcc -BUILD_OUTPUT := $(PWD) +BUILD_OUTPUT := $(CURDIR) PREFIX := /usr DESTDIR := +ifeq ("$(origin O)", "command line") + BUILD_OUTPUT := $(O) +endif + turbostat : turbostat.c CFLAGS += -Wall CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"' diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c index 2935405ad22f..b9d48e8e1eb4 100644 --- a/virt/kvm/arm/vgic-v2.c +++ b/virt/kvm/arm/vgic-v2.c @@ -72,6 +72,8 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, { if (!(lr_desc.state & LR_STATE_MASK)) vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); + else + vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr); } static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) @@ -84,6 +86,11 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; } +static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0; +} + static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) { u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; @@ -148,6 +155,7 @@ static const struct vgic_ops vgic_v2_ops = { .sync_lr_elrsr = vgic_v2_sync_lr_elrsr, .get_elrsr = vgic_v2_get_elrsr, .get_eisr = vgic_v2_get_eisr, + .clear_eisr = vgic_v2_clear_eisr, .get_interrupt_status = vgic_v2_get_interrupt_status, .enable_underflow = vgic_v2_enable_underflow, .disable_underflow = vgic_v2_disable_underflow, diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 1c2c8eef0599..58b8af00ee4c 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c @@ -86,6 +86,8 @@ static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, { if (!(lr_desc.state & LR_STATE_MASK)) vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); + else + vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr); } static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu) @@ -98,6 +100,11 @@ static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu) return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr; } +static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0; +} + static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu) { u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr; @@ -162,6 +169,7 @@ static const struct vgic_ops vgic_v3_ops = { .sync_lr_elrsr = vgic_v3_sync_lr_elrsr, .get_elrsr = vgic_v3_get_elrsr, .get_eisr = vgic_v3_get_eisr, + .clear_eisr = vgic_v3_clear_eisr, .get_interrupt_status = vgic_v3_get_interrupt_status, .enable_underflow = vgic_v3_enable_underflow, .disable_underflow = vgic_v3_disable_underflow, diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 03affc7bf453..57a16f4836d0 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -1219,6 +1219,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu) return vgic_ops->get_eisr(vcpu); } +static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu) +{ + vgic_ops->clear_eisr(vcpu); +} + static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) { return vgic_ops->get_interrupt_status(vcpu); @@ -1258,6 +1263,7 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) vgic_set_lr(vcpu, lr_nr, vlr); clear_bit(lr_nr, vgic_cpu->lr_used); vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; + vgic_sync_lr_elrsr(vcpu, lr_nr, vlr); } /* @@ -1313,6 +1319,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); vlr.state |= LR_STATE_PENDING; vgic_set_lr(vcpu, lr, vlr); + vgic_sync_lr_elrsr(vcpu, lr, vlr); return true; } } @@ -1334,6 +1341,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) vlr.state |= LR_EOI_INT; vgic_set_lr(vcpu, lr, vlr); + vgic_sync_lr_elrsr(vcpu, lr, vlr); return true; } @@ -1502,6 +1510,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) if (status & INT_STATUS_UNDERFLOW) vgic_disable_underflow(vcpu); + /* + * In the next iterations of the vcpu loop, if we sync the vgic state + * after flushing it, but before entering the guest (this happens for + * pending signals and vmid rollovers), then make sure we don't pick + * up any old maintenance interrupts here. + */ + vgic_clear_eisr(vcpu); + return level_pending; } @@ -1706,6 +1722,9 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, goto out; } + if (irq_num >= kvm->arch.vgic.nr_irqs) + return -EINVAL; + vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level); if (vcpu_id >= 0) { /* kick the specified vcpu */ @@ -1809,7 +1828,7 @@ static int vgic_init(struct kvm *kvm) nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus); if (!nr_cpus) /* No vcpus? Can't be good... */ - return -EINVAL; + return -ENODEV; /* * If nobody configured the number of interrupts, use the diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0fba701bc518..0edccc8f6ffb 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1608,8 +1608,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, ghc->generation = slots->generation; ghc->len = len; ghc->memslot = gfn_to_memslot(kvm, start_gfn); - ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); - if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL); + if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) { ghc->hva += offset; } else { /*