diff --git a/Makefile b/Makefile index 9dc2aec1c2e5..d7b64830a7b7 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 11 -SUBLEVEL = 1 +SUBLEVEL = 2 EXTRAVERSION = NAME = Fearless Coyote diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index c2b131527a64..a08d7a93aebb 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c @@ -208,9 +208,10 @@ int kvm_psci_version(struct kvm_vcpu *vcpu) static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) { - int ret = 1; + struct kvm *kvm = vcpu->kvm; unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); unsigned long val; + int ret = 1; switch (psci_fn) { case PSCI_0_2_FN_PSCI_VERSION: @@ -230,7 +231,9 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) break; case PSCI_0_2_FN_CPU_ON: case PSCI_0_2_FN64_CPU_ON: + mutex_lock(&kvm->lock); val = kvm_psci_vcpu_on(vcpu); + mutex_unlock(&kvm->lock); break; case PSCI_0_2_FN_AFFINITY_INFO: case PSCI_0_2_FN64_AFFINITY_INFO: @@ -279,6 +282,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) { + struct kvm *kvm = vcpu->kvm; unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); unsigned long val; @@ -288,7 +292,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) val = PSCI_RET_SUCCESS; break; case KVM_PSCI_FN_CPU_ON: + mutex_lock(&kvm->lock); val = kvm_psci_vcpu_on(vcpu); + mutex_unlock(&kvm->lock); break; default: val = PSCI_RET_NOT_SUPPORTED; diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index f5ea0ba70f07..fe39e6841326 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -240,6 +240,12 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; } +static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) +{ + u32 esr = kvm_vcpu_get_hsr(vcpu); + return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; +} + static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) { return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 0e26f8c2b56f..79168b38eeba 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1638,8 +1638,8 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, { struct sys_reg_params params; u32 hsr = kvm_vcpu_get_hsr(vcpu); - int Rt = (hsr >> 5) & 0xf; - int Rt2 = (hsr >> 10) & 0xf; + int Rt = kvm_vcpu_sys_get_rt(vcpu); + int Rt2 = (hsr >> 10) & 0x1f; params.is_aarch32 = true; params.is_32bit = false; @@ -1690,7 +1690,7 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, { struct sys_reg_params params; u32 hsr = kvm_vcpu_get_hsr(vcpu); - int Rt = (hsr >> 5) & 0xf; + int Rt = kvm_vcpu_sys_get_rt(vcpu); params.is_aarch32 = true; params.is_32bit = true; @@ -1805,7 +1805,7 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct sys_reg_params params; unsigned long esr = kvm_vcpu_get_hsr(vcpu); - int Rt = (esr >> 5) & 0x1f; + int Rt = kvm_vcpu_sys_get_rt(vcpu); int ret; trace_kvm_handle_sys_reg(esr); diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index d5e2b8309939..021db31b40ba 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -561,6 +561,7 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, static struct pstore_info nvram_pstore_info = { .owner = THIS_MODULE, .name = "nvram", + .flags = PSTORE_FLAGS_DMESG, .open = nvram_pstore_open, .read = nvram_pstore_read, .write = nvram_pstore_write, diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index 9b42b6d1e902..ef5a9cc66fb8 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -16,7 +16,7 @@ #ifndef BOOT_BOOT_H #define BOOT_BOOT_H -#define STACK_SIZE 512 /* Minimum number of bytes for stack */ +#define STACK_SIZE 1024 /* Minimum number of bytes for stack */ #ifndef __ASSEMBLY__ diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 9d05c7e67f60..a45e2114a846 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -761,7 +761,7 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init), X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsw_rapl_init), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init), X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init), X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init), diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h index 529bb4a6487a..e2904373010d 100644 --- a/arch/x86/include/asm/pmem.h +++ b/arch/x86/include/asm/pmem.h @@ -103,7 +103,7 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes, if (bytes < 8) { if (!IS_ALIGNED(dest, 4) || (bytes != 4)) - arch_wb_cache_pmem(addr, 1); + arch_wb_cache_pmem(addr, bytes); } else { if (!IS_ALIGNED(dest, 8)) { dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index bad6a25067bc..9fa5b8164961 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -529,14 +529,16 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val) { - return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, &val, - sizeof(val)); + + return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val, + sizeof(val)); } static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val) { - return kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, val, - sizeof(*val)); + + return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val, + sizeof(*val)); } static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) @@ -2285,8 +2287,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; - if (kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data, - sizeof(u32))) + if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, + sizeof(u32))) return; apic_set_tpr(vcpu->arch.apic, data & 0xff); @@ -2338,14 +2340,14 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) max_isr = 0; data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); - kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data, - sizeof(u32)); + kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, + sizeof(u32)); } int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) { if (vapic_addr) { - if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apic->vapic_cache, vapic_addr, sizeof(u32))) return -EINVAL; @@ -2439,7 +2441,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) vcpu->arch.pv_eoi.msr_val = data; if (!pv_eoi_enabled(vcpu)) return 0; - return kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.pv_eoi.data, + return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, addr, sizeof(u8)); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ccbd45ecd41a..421a069b5429 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1813,7 +1813,7 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v) struct kvm_vcpu_arch *vcpu = &v->arch; struct pvclock_vcpu_time_info guest_hv_clock; - if (unlikely(kvm_vcpu_read_guest_cached(v, &vcpu->pv_time, + if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, &guest_hv_clock, sizeof(guest_hv_clock)))) return; @@ -1834,9 +1834,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v) BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); vcpu->hv_clock.version = guest_hv_clock.version + 1; - kvm_vcpu_write_guest_cached(v, &vcpu->pv_time, - &vcpu->hv_clock, - sizeof(vcpu->hv_clock.version)); + kvm_write_guest_cached(v->kvm, &vcpu->pv_time, + &vcpu->hv_clock, + sizeof(vcpu->hv_clock.version)); smp_wmb(); @@ -1850,16 +1850,16 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v) trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); - kvm_vcpu_write_guest_cached(v, &vcpu->pv_time, - &vcpu->hv_clock, - sizeof(vcpu->hv_clock)); + kvm_write_guest_cached(v->kvm, &vcpu->pv_time, + &vcpu->hv_clock, + sizeof(vcpu->hv_clock)); smp_wmb(); vcpu->hv_clock.version++; - kvm_vcpu_write_guest_cached(v, &vcpu->pv_time, - &vcpu->hv_clock, - sizeof(vcpu->hv_clock.version)); + kvm_write_guest_cached(v->kvm, &vcpu->pv_time, + &vcpu->hv_clock, + sizeof(vcpu->hv_clock.version)); } static int kvm_guest_time_update(struct kvm_vcpu *v) @@ -2092,7 +2092,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) return 0; } - if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.apf.data, gpa, + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, sizeof(u32))) return 1; @@ -2111,7 +2111,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu) if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; - if (unlikely(kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.st.stime, + if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) return; @@ -2122,7 +2122,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu) vcpu->arch.st.steal.version += 1; - kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime, + kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); smp_wmb(); @@ -2131,14 +2131,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu) vcpu->arch.st.last_steal; vcpu->arch.st.last_steal = current->sched_info.run_delay; - kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime, + kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); smp_wmb(); vcpu->arch.st.steal.version += 1; - kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime, + kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); } @@ -2243,7 +2243,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!(data & 1)) break; - if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_time, data & ~1ULL, sizeof(struct pvclock_vcpu_time_info))) vcpu->arch.pv_time_enabled = false; @@ -2264,7 +2264,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data & KVM_STEAL_RESERVED_MASK) return 1; - if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.st.stime, + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, data & KVM_STEAL_VALID_BITS, sizeof(struct kvm_steal_time))) return 1; @@ -2878,7 +2878,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) vcpu->arch.st.steal.preempted = 1; - kvm_vcpu_write_guest_offset_cached(vcpu, &vcpu->arch.st.stime, + kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal.preempted, offsetof(struct kvm_steal_time, preempted), sizeof(vcpu->arch.st.steal.preempted)); @@ -3127,6 +3127,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) return -EINVAL; + /* INITs are latched while in SMM */ + if (events->flags & KVM_VCPUEVENT_VALID_SMM && + (events->smi.smm || events->smi.pending) && + vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) + return -EINVAL; + process_nmi(vcpu); vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.nr = events->exception.nr; @@ -7355,6 +7361,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, mp_state->mp_state != KVM_MP_STATE_RUNNABLE) return -EINVAL; + /* INITs are latched while in SMM */ + if ((is_smm(vcpu) || vcpu->arch.smi_pending) && + (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || + mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) + return -EINVAL; + if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); @@ -8536,8 +8548,9 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) { - return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apf.data, &val, - sizeof(val)); + + return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, + sizeof(val)); } void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c index a5c9910d234f..09a085bde0d4 100644 --- a/arch/x86/um/ptrace_64.c +++ b/arch/x86/um/ptrace_64.c @@ -125,7 +125,7 @@ int poke_user(struct task_struct *child, long addr, long data) else if ((addr >= offsetof(struct user, u_debugreg[0])) && (addr <= offsetof(struct user, u_debugreg[7]))) { addr -= offsetof(struct user, u_debugreg[0]); - addr = addr >> 2; + addr = addr >> 3; if ((addr == 4) || (addr == 5)) return -EIO; child->thread.arch.debugregs[addr] = data; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 37cb5aad71de..07b13e26215e 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -2023,7 +2023,8 @@ static unsigned long __init xen_read_phys_ulong(phys_addr_t addr) /* * Translate a virtual address to a physical one without relying on mapped - * page tables. + * page tables. Don't rely on big pages being aligned in (guest) physical + * space! */ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) { @@ -2044,7 +2045,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) sizeof(pud))); if (!pud_present(pud)) return 0; - pa = pud_pfn(pud) << PAGE_SHIFT; + pa = pud_val(pud) & PTE_PFN_MASK; if (pud_large(pud)) return pa + (vaddr & ~PUD_MASK); @@ -2052,7 +2053,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) sizeof(pmd))); if (!pmd_present(pmd)) return 0; - pa = pmd_pfn(pmd) << PAGE_SHIFT; + pa = pmd_val(pmd) & PTE_PFN_MASK; if (pmd_large(pmd)) return pa + (vaddr & ~PMD_MASK); diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 35c5af1ea068..e4ebd79de679 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -412,7 +412,8 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE | template->flags; - bi->interval_exp = ilog2(queue_logical_block_size(disk->queue)); + bi->interval_exp = template->interval_exp ? : + ilog2(queue_logical_block_size(disk->queue)); bi->profile = template->profile ? template->profile : &nop_profile; bi->tuple_size = template->tuple_size; bi->tag_size = template->tag_size; diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index ef59d9926ee9..8af664f7d27c 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -45,6 +45,11 @@ struct aead_async_req { char iv[]; }; +struct aead_tfm { + struct crypto_aead *aead; + bool has_key; +}; + struct aead_ctx { struct aead_sg_list tsgl; struct aead_async_rsgl first_rsgl; @@ -723,24 +728,146 @@ static struct proto_ops algif_aead_ops = { .poll = aead_poll, }; +static int aead_check_key(struct socket *sock) +{ + int err = 0; + struct sock *psk; + struct alg_sock *pask; + struct aead_tfm *tfm; + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + + lock_sock(sk); + if (ask->refcnt) + goto unlock_child; + + psk = ask->parent; + pask = alg_sk(ask->parent); + tfm = pask->private; + + err = -ENOKEY; + lock_sock_nested(psk, SINGLE_DEPTH_NESTING); + if (!tfm->has_key) + goto unlock; + + if (!pask->refcnt++) + sock_hold(psk); + + ask->refcnt = 1; + sock_put(psk); + + err = 0; + +unlock: + release_sock(psk); +unlock_child: + release_sock(sk); + + return err; +} + +static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg, + size_t size) +{ + int err; + + err = aead_check_key(sock); + if (err) + return err; + + return aead_sendmsg(sock, msg, size); +} + +static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page, + int offset, size_t size, int flags) +{ + int err; + + err = aead_check_key(sock); + if (err) + return err; + + return aead_sendpage(sock, page, offset, size, flags); +} + +static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg, + size_t ignored, int flags) +{ + int err; + + err = aead_check_key(sock); + if (err) + return err; + + return aead_recvmsg(sock, msg, ignored, flags); +} + +static struct proto_ops algif_aead_ops_nokey = { + .family = PF_ALG, + + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .getname = sock_no_getname, + .ioctl = sock_no_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .getsockopt = sock_no_getsockopt, + .mmap = sock_no_mmap, + .bind = sock_no_bind, + .accept = sock_no_accept, + .setsockopt = sock_no_setsockopt, + + .release = af_alg_release, + .sendmsg = aead_sendmsg_nokey, + .sendpage = aead_sendpage_nokey, + .recvmsg = aead_recvmsg_nokey, + .poll = aead_poll, +}; + static void *aead_bind(const char *name, u32 type, u32 mask) { - return crypto_alloc_aead(name, type, mask); + struct aead_tfm *tfm; + struct crypto_aead *aead; + + tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); + if (!tfm) + return ERR_PTR(-ENOMEM); + + aead = crypto_alloc_aead(name, type, mask); + if (IS_ERR(aead)) { + kfree(tfm); + return ERR_CAST(aead); + } + + tfm->aead = aead; + + return tfm; } static void aead_release(void *private) { - crypto_free_aead(private); + struct aead_tfm *tfm = private; + + crypto_free_aead(tfm->aead); + kfree(tfm); } static int aead_setauthsize(void *private, unsigned int authsize) { - return crypto_aead_setauthsize(private, authsize); + struct aead_tfm *tfm = private; + + return crypto_aead_setauthsize(tfm->aead, authsize); } static int aead_setkey(void *private, const u8 *key, unsigned int keylen) { - return crypto_aead_setkey(private, key, keylen); + struct aead_tfm *tfm = private; + int err; + + err = crypto_aead_setkey(tfm->aead, key, keylen); + tfm->has_key = !err; + + return err; } static void aead_sock_destruct(struct sock *sk) @@ -757,12 +884,14 @@ static void aead_sock_destruct(struct sock *sk) af_alg_release_parent(sk); } -static int aead_accept_parent(void *private, struct sock *sk) +static int aead_accept_parent_nokey(void *private, struct sock *sk) { struct aead_ctx *ctx; struct alg_sock *ask = alg_sk(sk); - unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private); - unsigned int ivlen = crypto_aead_ivsize(private); + struct aead_tfm *tfm = private; + struct crypto_aead *aead = tfm->aead; + unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead); + unsigned int ivlen = crypto_aead_ivsize(aead); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) @@ -789,7 +918,7 @@ static int aead_accept_parent(void *private, struct sock *sk) ask->private = ctx; - aead_request_set_tfm(&ctx->aead_req, private); + aead_request_set_tfm(&ctx->aead_req, aead); aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_complete, &ctx->completion); @@ -798,13 +927,25 @@ static int aead_accept_parent(void *private, struct sock *sk) return 0; } +static int aead_accept_parent(void *private, struct sock *sk) +{ + struct aead_tfm *tfm = private; + + if (!tfm->has_key) + return -ENOKEY; + + return aead_accept_parent_nokey(private, sk); +} + static const struct af_alg_type algif_type_aead = { .bind = aead_bind, .release = aead_release, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .accept = aead_accept_parent, + .accept_nokey = aead_accept_parent_nokey, .ops = &algif_aead_ops, + .ops_nokey = &algif_aead_ops_nokey, .name = "aead", .owner = THIS_MODULE }; diff --git a/drivers/Makefile b/drivers/Makefile index 2eced9afba53..8f8bdc9e3d29 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -104,6 +104,7 @@ obj-$(CONFIG_USB_PHY) += usb/ obj-$(CONFIG_USB) += usb/ obj-$(CONFIG_PCI) += usb/ obj-$(CONFIG_USB_GADGET) += usb/ +obj-$(CONFIG_OF) += usb/ obj-$(CONFIG_SERIO) += input/serio/ obj-$(CONFIG_GAMEPORT) += input/gameport/ obj-$(CONFIG_INPUT) += input/ diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 1ac70744ae7b..50f56d066936 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3462,6 +3462,14 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) if (unlikely(!dev->dma_mode)) goto invalid_opcode; + /* + * We only allow sending this command through the block layer, + * as it modifies the DATA OUT buffer, which would corrupt user + * memory for SG_IO commands. + */ + if (unlikely(blk_rq_is_passthrough(scmd->request))) + goto invalid_opcode; + if (unlikely(scmd->cmd_len < 16)) { fp = 15; goto invalid_fld; diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 5262a2077d7a..11f30e5cec2c 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -287,6 +287,9 @@ static int bcm_open(struct hci_uart *hu) hu->priv = bcm; + if (!hu->tty->dev) + goto out; + mutex_lock(&bcm_device_lock); list_for_each(p, &bcm_device_list) { struct bcm_device *dev = list_entry(p, struct bcm_device, list); @@ -307,7 +310,7 @@ static int bcm_open(struct hci_uart *hu) } mutex_unlock(&bcm_device_lock); - +out: return 0; } diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 9e271286c5e5..73306384af6c 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -307,6 +307,9 @@ static int intel_set_power(struct hci_uart *hu, bool powered) struct list_head *p; int err = -ENODEV; + if (!hu->tty->dev) + return err; + mutex_lock(&intel_device_list_lock); list_for_each(p, &intel_device_list) { @@ -379,6 +382,9 @@ static void intel_busy_work(struct work_struct *work) struct intel_data *intel = container_of(work, struct intel_data, busy_work); + if (!intel->hu->tty->dev) + return; + /* Link is busy, delay the suspend */ mutex_lock(&intel_device_list_lock); list_for_each(p, &intel_device_list) { @@ -889,6 +895,8 @@ static int intel_setup(struct hci_uart *hu) list_for_each(p, &intel_device_list) { struct intel_device *dev = list_entry(p, struct intel_device, list); + if (!hu->tty->dev) + break; if (hu->tty->dev->parent == dev->pdev->dev.parent) { if (device_may_wakeup(&dev->pdev->dev)) { set_bit(STATE_LPM_ENABLED, &intel->flags); @@ -1056,6 +1064,9 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb) BT_DBG("hu %p skb %p", hu, skb); + if (!hu->tty->dev) + goto out_enqueue; + /* Be sure our controller is resumed and potential LPM transaction * completed before enqueuing any packet. */ @@ -1072,7 +1083,7 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb) } } mutex_unlock(&intel_device_list_lock); - +out_enqueue: skb_queue_tail(&intel->txq, skb); return 0; diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index cca6e5bc1cea..51ba67de862e 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -891,6 +891,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, * for details on the intricacies of this. */ int left; + unsigned char *data_to_send; ssif_inc_stat(ssif_info, sent_messages_parts); @@ -899,6 +900,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, left = 32; /* Length byte. */ ssif_info->multi_data[ssif_info->multi_pos] = left; + data_to_send = ssif_info->multi_data + ssif_info->multi_pos; ssif_info->multi_pos += left; if (left < 32) /* @@ -912,7 +914,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, rv = ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE, SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE, - ssif_info->multi_data + ssif_info->multi_pos, + data_to_send, I2C_SMBUS_BLOCK_DATA); if (rv < 0) { /* request failed, just return the error. */ diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 7bc09989e18a..c46eeda71595 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -315,17 +315,73 @@ static int ccp_perform_ecc(struct ccp_op *op) return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } +static void ccp_disable_queue_interrupts(struct ccp_device *ccp) +{ + iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); +} + +static void ccp_enable_queue_interrupts(struct ccp_device *ccp) +{ + iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG); +} + +static void ccp_irq_bh(unsigned long data) +{ + struct ccp_device *ccp = (struct ccp_device *)data; + struct ccp_cmd_queue *cmd_q; + u32 q_int, status; + unsigned int i; + + status = ioread32(ccp->io_regs + IRQ_STATUS_REG); + + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + q_int = status & (cmd_q->int_ok | cmd_q->int_err); + if (q_int) { + cmd_q->int_status = status; + cmd_q->q_status = ioread32(cmd_q->reg_status); + cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); + + /* On error, only save the first error value */ + if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error) + cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); + + cmd_q->int_rcvd = 1; + + /* Acknowledge the interrupt and wake the kthread */ + iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG); + wake_up_interruptible(&cmd_q->int_queue); + } + } + ccp_enable_queue_interrupts(ccp); +} + +static irqreturn_t ccp_irq_handler(int irq, void *data) +{ + struct device *dev = data; + struct ccp_device *ccp = dev_get_drvdata(dev); + + ccp_disable_queue_interrupts(ccp); + if (ccp->use_tasklet) + tasklet_schedule(&ccp->irq_tasklet); + else + ccp_irq_bh((unsigned long)ccp); + + return IRQ_HANDLED; +} + static int ccp_init(struct ccp_device *ccp) { struct device *dev = ccp->dev; struct ccp_cmd_queue *cmd_q; struct dma_pool *dma_pool; char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; - unsigned int qmr, qim, i; + unsigned int qmr, i; int ret; /* Find available queues */ - qim = 0; + ccp->qim = 0; qmr = ioread32(ccp->io_regs + Q_MASK_REG); for (i = 0; i < MAX_HW_QUEUES; i++) { if (!(qmr & (1 << i))) @@ -370,7 +426,7 @@ static int ccp_init(struct ccp_device *ccp) init_waitqueue_head(&cmd_q->int_queue); /* Build queue interrupt mask (two interrupts per queue) */ - qim |= cmd_q->int_ok | cmd_q->int_err; + ccp->qim |= cmd_q->int_ok | cmd_q->int_err; #ifdef CONFIG_ARM64 /* For arm64 set the recommended queue cache settings */ @@ -388,14 +444,14 @@ static int ccp_init(struct ccp_device *ccp) dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); /* Disable and clear interrupts until ready */ - iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); + ccp_disable_queue_interrupts(ccp); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; ioread32(cmd_q->reg_int_status); ioread32(cmd_q->reg_status); } - iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); + iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG); /* Request an irq */ ret = ccp->get_irq(ccp); @@ -404,6 +460,11 @@ static int ccp_init(struct ccp_device *ccp) goto e_pool; } + /* Initialize the ISR tasklet? */ + if (ccp->use_tasklet) + tasklet_init(&ccp->irq_tasklet, ccp_irq_bh, + (unsigned long)ccp); + dev_dbg(dev, "Starting threads...\n"); /* Create a kthread for each queue */ for (i = 0; i < ccp->cmd_q_count; i++) { @@ -426,7 +487,7 @@ static int ccp_init(struct ccp_device *ccp) dev_dbg(dev, "Enabling interrupts...\n"); /* Enable interrupts */ - iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); + ccp_enable_queue_interrupts(ccp); dev_dbg(dev, "Registering device...\n"); ccp_add_device(ccp); @@ -463,7 +524,7 @@ static void ccp_destroy(struct ccp_device *ccp) { struct ccp_cmd_queue *cmd_q; struct ccp_cmd *cmd; - unsigned int qim, i; + unsigned int i; /* Unregister the DMA engine */ ccp_dmaengine_unregister(ccp); @@ -474,22 +535,15 @@ static void ccp_destroy(struct ccp_device *ccp) /* Remove this device from the list of available units */ ccp_del_device(ccp); - /* Build queue interrupt mask (two interrupt masks per queue) */ - qim = 0; - for (i = 0; i < ccp->cmd_q_count; i++) { - cmd_q = &ccp->cmd_q[i]; - qim |= cmd_q->int_ok | cmd_q->int_err; - } - /* Disable and clear interrupts */ - iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); + ccp_disable_queue_interrupts(ccp); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; ioread32(cmd_q->reg_int_status); ioread32(cmd_q->reg_status); } - iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); + iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG); /* Stop the queue kthreads */ for (i = 0; i < ccp->cmd_q_count; i++) @@ -516,40 +570,6 @@ static void ccp_destroy(struct ccp_device *ccp) } } -static irqreturn_t ccp_irq_handler(int irq, void *data) -{ - struct device *dev = data; - struct ccp_device *ccp = dev_get_drvdata(dev); - struct ccp_cmd_queue *cmd_q; - u32 q_int, status; - unsigned int i; - - status = ioread32(ccp->io_regs + IRQ_STATUS_REG); - - for (i = 0; i < ccp->cmd_q_count; i++) { - cmd_q = &ccp->cmd_q[i]; - - q_int = status & (cmd_q->int_ok | cmd_q->int_err); - if (q_int) { - cmd_q->int_status = status; - cmd_q->q_status = ioread32(cmd_q->reg_status); - cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); - - /* On error, only save the first error value */ - if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error) - cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); - - cmd_q->int_rcvd = 1; - - /* Acknowledge the interrupt and wake the kthread */ - iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG); - wake_up_interruptible(&cmd_q->int_queue); - } - } - - return IRQ_HANDLED; -} - static const struct ccp_actions ccp3_actions = { .aes = ccp_perform_aes, .xts_aes = ccp_perform_xts_aes, diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index fc08b4ed69d9..4e2b01091715 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -653,6 +653,65 @@ static int ccp_assign_lsbs(struct ccp_device *ccp) return rc; } +static void ccp5_disable_queue_interrupts(struct ccp_device *ccp) +{ + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) + iowrite32(0x0, ccp->cmd_q[i].reg_int_enable); +} + +static void ccp5_enable_queue_interrupts(struct ccp_device *ccp) +{ + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) + iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable); +} + +static void ccp5_irq_bh(unsigned long data) +{ + struct ccp_device *ccp = (struct ccp_device *)data; + u32 status; + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) { + struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; + + status = ioread32(cmd_q->reg_interrupt_status); + + if (status) { + cmd_q->int_status = status; + cmd_q->q_status = ioread32(cmd_q->reg_status); + cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); + + /* On error, only save the first error value */ + if ((status & INT_ERROR) && !cmd_q->cmd_error) + cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); + + cmd_q->int_rcvd = 1; + + /* Acknowledge the interrupt and wake the kthread */ + iowrite32(status, cmd_q->reg_interrupt_status); + wake_up_interruptible(&cmd_q->int_queue); + } + } + ccp5_enable_queue_interrupts(ccp); +} + +static irqreturn_t ccp5_irq_handler(int irq, void *data) +{ + struct device *dev = data; + struct ccp_device *ccp = dev_get_drvdata(dev); + + ccp5_disable_queue_interrupts(ccp); + if (ccp->use_tasklet) + tasklet_schedule(&ccp->irq_tasklet); + else + ccp5_irq_bh((unsigned long)ccp); + return IRQ_HANDLED; +} + static int ccp5_init(struct ccp_device *ccp) { struct device *dev = ccp->dev; @@ -736,19 +795,18 @@ static int ccp5_init(struct ccp_device *ccp) } /* Turn off the queues and disable interrupts until ready */ + ccp5_disable_queue_interrupts(ccp); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; cmd_q->qcontrol = 0; /* Start with nothing */ iowrite32(cmd_q->qcontrol, cmd_q->reg_control); - /* Disable the interrupts */ - iowrite32(0x00, cmd_q->reg_int_enable); ioread32(cmd_q->reg_int_status); ioread32(cmd_q->reg_status); - /* Clear the interrupts */ - iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); + /* Clear the interrupt status */ + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); } dev_dbg(dev, "Requesting an IRQ...\n"); @@ -758,6 +816,10 @@ static int ccp5_init(struct ccp_device *ccp) dev_err(dev, "unable to allocate an IRQ\n"); goto e_pool; } + /* Initialize the ISR tasklet */ + if (ccp->use_tasklet) + tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh, + (unsigned long)ccp); dev_dbg(dev, "Loading LSB map...\n"); /* Copy the private LSB mask to the public registers */ @@ -826,11 +888,7 @@ static int ccp5_init(struct ccp_device *ccp) } dev_dbg(dev, "Enabling interrupts...\n"); - /* Enable interrupts */ - for (i = 0; i < ccp->cmd_q_count; i++) { - cmd_q = &ccp->cmd_q[i]; - iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable); - } + ccp5_enable_queue_interrupts(ccp); dev_dbg(dev, "Registering device...\n"); /* Put this on the unit list to make it available */ @@ -882,17 +940,15 @@ static void ccp5_destroy(struct ccp_device *ccp) ccp_del_device(ccp); /* Disable and clear interrupts */ + ccp5_disable_queue_interrupts(ccp); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; /* Turn off the run bit */ iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); - /* Disable the interrupts */ - iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); - /* Clear the interrupt status */ - iowrite32(0x00, cmd_q->reg_int_enable); + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); ioread32(cmd_q->reg_int_status); ioread32(cmd_q->reg_status); } @@ -925,38 +981,6 @@ static void ccp5_destroy(struct ccp_device *ccp) } } -static irqreturn_t ccp5_irq_handler(int irq, void *data) -{ - struct device *dev = data; - struct ccp_device *ccp = dev_get_drvdata(dev); - u32 status; - unsigned int i; - - for (i = 0; i < ccp->cmd_q_count; i++) { - struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; - - status = ioread32(cmd_q->reg_interrupt_status); - - if (status) { - cmd_q->int_status = status; - cmd_q->q_status = ioread32(cmd_q->reg_status); - cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); - - /* On error, only save the first error value */ - if ((status & INT_ERROR) && !cmd_q->cmd_error) - cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); - - cmd_q->int_rcvd = 1; - - /* Acknowledge the interrupt and wake the kthread */ - iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); - wake_up_interruptible(&cmd_q->int_queue); - } - } - - return IRQ_HANDLED; -} - static void ccp5_config(struct ccp_device *ccp) { /* Public side */ diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index aa36f3f81860..6bb60e11b0e6 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -109,9 +109,8 @@ #define INT_COMPLETION 0x1 #define INT_ERROR 0x2 #define INT_QUEUE_STOPPED 0x4 -#define ALL_INTERRUPTS (INT_COMPLETION| \ - INT_ERROR| \ - INT_QUEUE_STOPPED) +#define INT_EMPTY_QUEUE 0x8 +#define SUPPORTED_INTERRUPTS (INT_COMPLETION | INT_ERROR) #define LSB_REGION_WIDTH 5 #define MAX_LSB_CNT 8 @@ -337,7 +336,10 @@ struct ccp_device { void *dev_specific; int (*get_irq)(struct ccp_device *ccp); void (*free_irq)(struct ccp_device *ccp); + unsigned int qim; unsigned int irq; + bool use_tasklet; + struct tasklet_struct irq_tasklet; /* I/O area used for device communication. The register mapping * starts at an offset into the mapped bar. diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 28a9996c1085..e880d4cf4ada 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c @@ -69,6 +69,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp) goto e_irq; } } + ccp->use_tasklet = true; return 0; @@ -100,6 +101,7 @@ static int ccp_get_msi_irq(struct ccp_device *ccp) dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); goto e_msi; } + ccp->use_tasklet = true; return 0; diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 1b9da3dc799b..6c620487e9c2 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -287,7 +287,6 @@ static void s5p_sg_done(struct s5p_aes_dev *dev) static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) { dev->req->base.complete(&dev->req->base, err); - dev->busy = false; } static void s5p_unset_outdata(struct s5p_aes_dev *dev) @@ -462,7 +461,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) spin_unlock_irqrestore(&dev->lock, flags); s5p_aes_complete(dev, 0); - dev->busy = true; + /* Device is still busy */ tasklet_schedule(&dev->tasklet); } else { /* @@ -483,6 +482,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) error: s5p_sg_done(dev); + dev->busy = false; spin_unlock_irqrestore(&dev->lock, flags); s5p_aes_complete(dev, err); @@ -634,6 +634,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) indata_error: s5p_sg_done(dev); + dev->busy = false; spin_unlock_irqrestore(&dev->lock, flags); s5p_aes_complete(dev, err); } diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index 806f180c80d8..f71ececd2678 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c @@ -77,36 +77,27 @@ struct dax_dev { struct resource res[0]; }; +/* + * Rely on the fact that drvdata is set before the attributes are + * registered, and that the attributes are unregistered before drvdata + * is cleared to assume that drvdata is always valid. + */ static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dax_region *dax_region; - ssize_t rc = -ENXIO; + struct dax_region *dax_region = dev_get_drvdata(dev); - device_lock(dev); - dax_region = dev_get_drvdata(dev); - if (dax_region) - rc = sprintf(buf, "%d\n", dax_region->id); - device_unlock(dev); - - return rc; + return sprintf(buf, "%d\n", dax_region->id); } static DEVICE_ATTR_RO(id); static ssize_t region_size_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dax_region *dax_region; - ssize_t rc = -ENXIO; + struct dax_region *dax_region = dev_get_drvdata(dev); - device_lock(dev); - dax_region = dev_get_drvdata(dev); - if (dax_region) - rc = sprintf(buf, "%llu\n", (unsigned long long) - resource_size(&dax_region->res)); - device_unlock(dev); - - return rc; + return sprintf(buf, "%llu\n", (unsigned long long) + resource_size(&dax_region->res)); } static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, region_size_show, NULL); @@ -114,16 +105,9 @@ static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, static ssize_t align_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dax_region *dax_region; - ssize_t rc = -ENXIO; + struct dax_region *dax_region = dev_get_drvdata(dev); - device_lock(dev); - dax_region = dev_get_drvdata(dev); - if (dax_region) - rc = sprintf(buf, "%u\n", dax_region->align); - device_unlock(dev); - - return rc; + return sprintf(buf, "%u\n", dax_region->align); } static DEVICE_ATTR_RO(align); @@ -703,13 +687,10 @@ static void dax_dev_release(struct device *dev) kfree(dax_dev); } -static void unregister_dax_dev(void *dev) +static void kill_dax_dev(struct dax_dev *dax_dev) { - struct dax_dev *dax_dev = to_dax_dev(dev); struct cdev *cdev = &dax_dev->cdev; - dev_dbg(dev, "%s\n", __func__); - /* * Note, rcu is not protecting the liveness of dax_dev, rcu is * ensuring that any fault handlers that might have seen @@ -721,6 +702,15 @@ static void unregister_dax_dev(void *dev) synchronize_srcu(&dax_srcu); unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1); cdev_del(cdev); +} + +static void unregister_dax_dev(void *dev) +{ + struct dax_dev *dax_dev = to_dax_dev(dev); + + dev_dbg(dev, "%s\n", __func__); + + kill_dax_dev(dax_dev); device_unregister(dev); } @@ -797,6 +787,7 @@ struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region, dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id); rc = device_add(dev); if (rc) { + kill_dax_dev(dax_dev); put_device(dev); return ERR_PTR(rc); } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 7c9e34d679d3..81d447da0048 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -172,8 +172,16 @@ static void ib_device_release(struct device *device) { struct ib_device *dev = container_of(device, struct ib_device, dev); - ib_cache_release_one(dev); - kfree(dev->port_immutable); + WARN_ON(dev->reg_state == IB_DEV_REGISTERED); + if (dev->reg_state == IB_DEV_UNREGISTERED) { + /* + * In IB_DEV_UNINITIALIZED state, cache or port table + * is not even created. Free cache and port table only when + * device reaches UNREGISTERED state. + */ + ib_cache_release_one(dev); + kfree(dev->port_immutable); + } kfree(dev); } @@ -380,32 +388,27 @@ int ib_register_device(struct ib_device *device, ret = ib_cache_setup_one(device); if (ret) { pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n"); - goto out; + goto port_cleanup; } ret = ib_device_register_rdmacg(device); if (ret) { pr_warn("Couldn't register device with rdma cgroup\n"); - ib_cache_cleanup_one(device); - goto out; + goto cache_cleanup; } memset(&device->attrs, 0, sizeof(device->attrs)); ret = device->query_device(device, &device->attrs, &uhw); if (ret) { pr_warn("Couldn't query the device attributes\n"); - ib_device_unregister_rdmacg(device); - ib_cache_cleanup_one(device); - goto out; + goto cache_cleanup; } ret = ib_device_register_sysfs(device, port_callback); if (ret) { pr_warn("Couldn't register device %s with driver model\n", device->name); - ib_device_unregister_rdmacg(device); - ib_cache_cleanup_one(device); - goto out; + goto cache_cleanup; } device->reg_state = IB_DEV_REGISTERED; @@ -417,6 +420,14 @@ int ib_register_device(struct ib_device *device, down_write(&lists_rwsem); list_add_tail(&device->core_list, &device_list); up_write(&lists_rwsem); + mutex_unlock(&device_mutex); + return 0; + +cache_cleanup: + ib_cache_cleanup_one(device); + ib_cache_release_one(device); +port_cleanup: + kfree(device->port_immutable); out: mutex_unlock(&device_mutex); return ret; diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index daadf3130c9f..48bb75503255 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -1301,7 +1301,7 @@ int ib_device_register_sysfs(struct ib_device *device, free_port_list_attributes(device); err_unregister: - device_unregister(class_dev); + device_del(class_dev); err: return ret; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 85ed5051fdfd..207e5c2457cc 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1519,7 +1519,9 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) if (!qp->device->attach_mcast) return -ENOSYS; - if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) + if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD || + lid < be16_to_cpu(IB_MULTICAST_LID_BASE) || + lid == be16_to_cpu(IB_LID_PERMISSIVE)) return -EINVAL; ret = qp->device->attach_mcast(qp, gid, lid); @@ -1535,7 +1537,9 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) if (!qp->device->detach_mcast) return -ENOSYS; - if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) + if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD || + lid < be16_to_cpu(IB_MULTICAST_LID_BASE) || + lid == be16_to_cpu(IB_LID_PERMISSIVE)) return -EINVAL; ret = qp->device->detach_mcast(qp, gid, lid); diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index aa15bcbfb079..17d7578de6e5 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2015, 2016 Intel Corporation. + * Copyright(c) 2015 - 2017 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -784,23 +784,29 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr, /* when sending, force a reschedule every one of these periods */ #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */ +void hfi1_do_send_from_rvt(struct rvt_qp *qp) +{ + hfi1_do_send(qp, false); +} + void _hfi1_do_send(struct work_struct *work) { struct iowait *wait = container_of(work, struct iowait, iowork); struct rvt_qp *qp = iowait_to_qp(wait); - hfi1_do_send(qp); + hfi1_do_send(qp, true); } /** * hfi1_do_send - perform a send on a QP * @work: contains a pointer to the QP + * @in_thread: true if in a workqueue thread * * Process entries in the send work queue until credit or queue is * exhausted. Only allow one CPU to send a packet per QP. * Otherwise, two threads could send packets out of order. */ -void hfi1_do_send(struct rvt_qp *qp) +void hfi1_do_send(struct rvt_qp *qp, bool in_thread) { struct hfi1_pkt_state ps; struct hfi1_qp_priv *priv = qp->priv; @@ -868,8 +874,10 @@ void hfi1_do_send(struct rvt_qp *qp) qp->s_hdrwords = 0; /* allow other tasks to run */ if (unlikely(time_after(jiffies, timeout))) { - if (workqueue_congested(cpu, - ps.ppd->hfi1_wq)) { + if (!in_thread || + workqueue_congested( + cpu, + ps.ppd->hfi1_wq)) { spin_lock_irqsave( &qp->s_lock, ps.flags); @@ -882,11 +890,9 @@ void hfi1_do_send(struct rvt_qp *qp) *ps.ppd->dd->send_schedule); return; } - if (!irqs_disabled()) { - cond_resched(); - this_cpu_inc( - *ps.ppd->dd->send_schedule); - } + cond_resched(); + this_cpu_inc( + *ps.ppd->dd->send_schedule); timeout = jiffies + (timeout_int) / 8; } spin_lock_irqsave(&qp->s_lock, ps.flags); diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 222315fadab1..16ef7b12b0b8 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2015, 2016 Intel Corporation. + * Copyright(c) 2015 - 2017 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -1751,7 +1751,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free; dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps; dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset; - dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send; + dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt; dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send; dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send; dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr; diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h index 3a0b589e41c2..92e72ab2b610 100644 --- a/drivers/infiniband/hw/hfi1/verbs.h +++ b/drivers/infiniband/hw/hfi1/verbs.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2015, 2016 Intel Corporation. + * Copyright(c) 2015 - 2017 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -350,7 +350,9 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr, void _hfi1_do_send(struct work_struct *work); -void hfi1_do_send(struct rvt_qp *qp); +void hfi1_do_send_from_rvt(struct rvt_qp *qp); + +void hfi1_do_send(struct rvt_qp *qp, bool in_thread); void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, enum ib_wc_status status); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index fba94df28cf1..c7e6d137c162 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2941,6 +2941,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); err_map: + mlx4_ib_free_eqs(dev, ibdev); iounmap(ibdev->uar_map); err_uar: diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index e010fe459e67..8772d88d324d 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -1102,7 +1102,8 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy while ((p = rb_first(&ctx->mcg_table)) != NULL) { group = rb_entry(p, struct mcast_group, node); if (atomic_read(&group->refcount)) - mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group); + mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n", + atomic_read(&group->refcount), group); force_clean_group(group); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c index 6bd5740e2691..09396bd7b02d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c @@ -281,8 +281,11 @@ void ipoib_delete_debug_files(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); + WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n"); + WARN_ONCE(!priv->path_dentry, "null path debug file\n"); debugfs_remove(priv->mcg_dentry); debugfs_remove(priv->path_dentry); + priv->mcg_dentry = priv->path_dentry = NULL; } int ipoib_register_debugfs(void) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index d1d3fb7a6127..b319cc26c9a7 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -108,6 +108,33 @@ static struct ib_client ipoib_client = { .get_net_dev_by_params = ipoib_get_net_dev_by_params, }; +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG +static int ipoib_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct netdev_notifier_info *ni = ptr; + struct net_device *dev = ni->dev; + + if (dev->netdev_ops->ndo_open != ipoib_open) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_REGISTER: + ipoib_create_debug_files(dev); + break; + case NETDEV_CHANGENAME: + ipoib_delete_debug_files(dev); + ipoib_create_debug_files(dev); + break; + case NETDEV_UNREGISTER: + ipoib_delete_debug_files(dev); + break; + } + + return NOTIFY_DONE; +} +#endif + int ipoib_open(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -1674,8 +1701,6 @@ void ipoib_dev_cleanup(struct net_device *dev) ASSERT_RTNL(); - ipoib_delete_debug_files(dev); - /* Delete any child interfaces first */ list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { /* Stop GC on child */ @@ -2090,8 +2115,6 @@ static struct net_device *ipoib_add_port(const char *format, goto register_failed; } - ipoib_create_debug_files(priv->dev); - if (ipoib_cm_add_mode_attr(priv->dev)) goto sysfs_failed; if (ipoib_add_pkey_attr(priv->dev)) @@ -2106,7 +2129,6 @@ static struct net_device *ipoib_add_port(const char *format, return priv->dev; sysfs_failed: - ipoib_delete_debug_files(priv->dev); unregister_netdev(priv->dev); register_failed: @@ -2191,6 +2213,12 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) kfree(dev_list); } +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG +static struct notifier_block ipoib_netdev_notifier = { + .notifier_call = ipoib_netdev_event, +}; +#endif + static int __init ipoib_init_module(void) { int ret; @@ -2243,6 +2271,9 @@ static int __init ipoib_init_module(void) if (ret) goto err_client; +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG + register_netdevice_notifier(&ipoib_netdev_notifier); +#endif return 0; err_client: @@ -2260,6 +2291,9 @@ static int __init ipoib_init_module(void) static void __exit ipoib_cleanup_module(void) { +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG + unregister_netdevice_notifier(&ipoib_netdev_notifier); +#endif ipoib_netlink_fini(); ib_unregister_client(&ipoib_client); ib_sa_unregister_client(&ipoib_sa_client); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 3e10e3dac2e7..e543bc745f34 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -86,8 +86,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, goto register_failed; } - ipoib_create_debug_files(priv->dev); - /* RTNL childs don't need proprietary sysfs entries */ if (type == IPOIB_LEGACY_CHILD) { if (ipoib_cm_add_mode_attr(priv->dev)) @@ -108,7 +106,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, sysfs_failed: result = -ENOMEM; - ipoib_delete_debug_files(priv->dev); unregister_netdevice(priv->dev); register_failed: diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 389a3637ffcc..b8f3d77d3b5c 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1649,12 +1649,16 @@ static int crypt_set_key(struct crypt_config *cc, char *key) static int crypt_wipe_key(struct crypt_config *cc) { + int r; + clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); - memset(&cc->key, 0, cc->key_size * sizeof(u8)); + get_random_bytes(&cc->key, cc->key_size); kzfree(cc->key_string); cc->key_string = NULL; + r = crypt_setkey(cc); + memset(&cc->key, 0, cc->key_size * sizeof(u8)); - return crypt_setkey(cc); + return r; } static void crypt_dtr(struct dm_target *ti) diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 9fab33b113c4..68d4084377ad 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -961,15 +961,15 @@ static int metadata_commit(struct era_metadata *md) } } - r = save_sm_root(md); + r = dm_tm_pre_commit(md->tm); if (r) { - DMERR("%s: save_sm_root failed", __func__); + DMERR("%s: pre commit failed", __func__); return r; } - r = dm_tm_pre_commit(md->tm); + r = save_sm_root(md); if (r) { - DMERR("%s: pre commit failed", __func__); + DMERR("%s: save_sm_root failed", __func__); return r; } diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 0b081d170087..505b9f6b4a47 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -810,10 +810,14 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) dm_init_md_queue(md); /* backfill 'mq' sysfs registration normally done in blk_register_queue */ - blk_mq_register_dev(disk_to_dev(md->disk), q); + err = blk_mq_register_dev(disk_to_dev(md->disk), q); + if (err) + goto out_cleanup_queue; return 0; +out_cleanup_queue: + blk_cleanup_queue(q); out_tag_set: blk_mq_free_tag_set(md->tag_set); out_kfree_tag_set: diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2b266a2b5035..5742e5eb0704 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1069,6 +1069,7 @@ static void passdown_endio(struct bio *bio) * to unmap (we ignore err). */ queue_passdown_pt2(bio->bi_private); + bio_put(bio); } static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a34f58772022..839ead062645 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2222,6 +2222,8 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) (i == r1_bio->read_disk || !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) continue; + if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) + continue; bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 97dd2925ed6e..4b76af2b8715 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -314,7 +314,7 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns) if (rc < 0) { struct nd_btt *nd_btt = to_nd_btt(btt_dev); - __nd_detach_ndns(btt_dev, &nd_btt->ndns); + nd_detach_ndns(btt_dev, &nd_btt->ndns); put_device(btt_dev); } diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index ca6d572c48fc..8513c8ac963b 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -21,8 +21,13 @@ void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns) { struct nd_namespace_common *ndns = *_ndns; + struct nvdimm_bus *nvdimm_bus; - lockdep_assert_held(&ndns->dev.mutex); + if (!ndns) + return; + + nvdimm_bus = walk_to_nvdimm_bus(&ndns->dev); + lockdep_assert_held(&nvdimm_bus->reconfig_mutex); dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__); ndns->claim = NULL; *_ndns = NULL; @@ -37,18 +42,20 @@ void nd_detach_ndns(struct device *dev, if (!ndns) return; get_device(&ndns->dev); - device_lock(&ndns->dev); + nvdimm_bus_lock(&ndns->dev); __nd_detach_ndns(dev, _ndns); - device_unlock(&ndns->dev); + nvdimm_bus_unlock(&ndns->dev); put_device(&ndns->dev); } bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, struct nd_namespace_common **_ndns) { + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev); + if (attach->claim) return false; - lockdep_assert_held(&attach->dev.mutex); + lockdep_assert_held(&nvdimm_bus->reconfig_mutex); dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__); attach->claim = dev; *_ndns = attach; @@ -61,9 +68,9 @@ bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, { bool claimed; - device_lock(&attach->dev); + nvdimm_bus_lock(&attach->dev); claimed = __nd_attach_ndns(dev, attach, _ndns); - device_unlock(&attach->dev); + nvdimm_bus_unlock(&attach->dev); return claimed; } @@ -114,7 +121,7 @@ static void nd_detach_and_reset(struct device *dev, struct nd_namespace_common **_ndns) { /* detach the namespace and destroy / reset the device */ - nd_detach_ndns(dev, _ndns); + __nd_detach_ndns(dev, _ndns); if (is_idle(dev, *_ndns)) { nd_device_unregister(dev, ND_ASYNC); } else if (is_nd_btt(dev)) { @@ -184,7 +191,7 @@ ssize_t nd_namespace_store(struct device *dev, } WARN_ON_ONCE(!is_nvdimm_bus_locked(dev)); - if (!nd_attach_ndns(dev, ndns, _ndns)) { + if (!__nd_attach_ndns(dev, ndns, _ndns)) { dev_dbg(dev, "%s already claimed\n", dev_name(&ndns->dev)); len = -EBUSY; diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c index 45fa82cae87c..c1b6556aea6e 100644 --- a/drivers/nvdimm/dax_devs.c +++ b/drivers/nvdimm/dax_devs.c @@ -124,7 +124,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns) dev_dbg(dev, "%s: dax: %s\n", __func__, rc == 0 ? dev_name(dax_dev) : ""); if (rc < 0) { - __nd_detach_ndns(dax_dev, &nd_pfn->ndns); + nd_detach_ndns(dax_dev, &nd_pfn->ndns); put_device(dax_dev); } else __nd_device_register(dax_dev); diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 6c033c9a2f06..335c8175410b 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -484,7 +484,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns) dev_dbg(dev, "%s: pfn: %s\n", __func__, rc == 0 ? dev_name(pfn_dev) : ""); if (rc < 0) { - __nd_detach_ndns(pfn_dev, &nd_pfn->ndns); + nd_detach_ndns(pfn_dev, &nd_pfn->ndns); put_device(pfn_dev); } else __nd_device_register(pfn_dev); @@ -538,7 +538,8 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn, nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); altmap = NULL; } else if (nd_pfn->mode == PFN_MODE_PMEM) { - nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE; + nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res) + - offset) / PAGE_SIZE); if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns) dev_info(&nd_pfn->dev, "number of pfns truncated from %lld to %ld\n", @@ -625,7 +626,8 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) */ start += start_pad; size = resource_size(&nsio->res); - npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K; + npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K) + / PAGE_SIZE); if (nd_pfn->mode == PFN_MODE_PMEM) { /* * vmemmap_populate_hugepages() allocates the memmap array in diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 5b536be5a12e..0fc18262a2bc 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -388,12 +388,12 @@ static void nd_pmem_shutdown(struct device *dev) static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) { - struct pmem_device *pmem = dev_get_drvdata(dev); - struct nd_region *nd_region = to_region(pmem); + struct nd_region *nd_region; resource_size_t offset = 0, end_trunc = 0; struct nd_namespace_common *ndns; struct nd_namespace_io *nsio; struct resource res; + struct badblocks *bb; if (event != NVDIMM_REVALIDATE_POISON) return; @@ -402,20 +402,33 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) struct nd_btt *nd_btt = to_nd_btt(dev); ndns = nd_btt->ndns; - } else if (is_nd_pfn(dev)) { - struct nd_pfn *nd_pfn = to_nd_pfn(dev); - struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; + nd_region = to_nd_region(ndns->dev.parent); + nsio = to_nd_namespace_io(&ndns->dev); + bb = &nsio->bb; + } else { + struct pmem_device *pmem = dev_get_drvdata(dev); - ndns = nd_pfn->ndns; - offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad); - end_trunc = __le32_to_cpu(pfn_sb->end_trunc); - } else - ndns = to_ndns(dev); + nd_region = to_region(pmem); + bb = &pmem->bb; + + if (is_nd_pfn(dev)) { + struct nd_pfn *nd_pfn = to_nd_pfn(dev); + struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; + + ndns = nd_pfn->ndns; + offset = pmem->data_offset + + __le32_to_cpu(pfn_sb->start_pad); + end_trunc = __le32_to_cpu(pfn_sb->end_trunc); + } else { + ndns = to_ndns(dev); + } + + nsio = to_nd_namespace_io(&ndns->dev); + } - nsio = to_nd_namespace_io(&ndns->dev); res.start = nsio->res.start + offset; res.end = nsio->res.end - end_trunc; - nvdimm_badblocks_populate(nd_region, &pmem->bb, &res); + nvdimm_badblocks_populate(nd_region, bb, &res); } MODULE_ALIAS("pmem"); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index b7cb5066d961..378885f4050b 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -968,17 +968,20 @@ EXPORT_SYMBOL_GPL(nvdimm_flush); */ int nvdimm_has_flush(struct nd_region *nd_region) { - struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); int i; /* no nvdimm == flushing capability unknown */ if (nd_region->ndr_mappings == 0) return -ENXIO; - for (i = 0; i < nd_region->ndr_mappings; i++) - /* flush hints present, flushing required */ - if (ndrd_get_flush_wpq(ndrd, i, 0)) + for (i = 0; i < nd_region->ndr_mappings; i++) { + struct nd_mapping *nd_mapping = &nd_region->mapping[i]; + struct nvdimm *nvdimm = nd_mapping->nvdimm; + + /* flush hints present / available */ + if (nvdimm->num_flush) return 1; + } /* * The platform defines dimm devices without hints, assume diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c index 70390de66e0e..eb0a095efe9c 100644 --- a/drivers/staging/comedi/drivers/jr3_pci.c +++ b/drivers/staging/comedi/drivers/jr3_pci.c @@ -611,7 +611,7 @@ static void jr3_pci_poll_dev(unsigned long data) s = &dev->subdevices[i]; spriv = s->private; - if (now > spriv->next_time_min) { + if (time_after_eq(now, spriv->next_time_min)) { struct jr3_pci_poll_delay sub_delay; sub_delay = jr3_pci_poll_subdevice(s); @@ -727,11 +727,12 @@ static int jr3_pci_auto_attach(struct comedi_device *dev, s->insn_read = jr3_pci_ai_insn_read; spriv = jr3_pci_alloc_spriv(dev, s); - if (spriv) { - /* Channel specific range and maxdata */ - s->range_table_list = spriv->range_table_list; - s->maxdata_list = spriv->maxdata_list; - } + if (!spriv) + return -ENOMEM; + + /* Channel specific range and maxdata */ + s->range_table_list = spriv->range_table_list; + s->maxdata_list = spriv->maxdata_list; } /* Reset DSP card */ diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c index 400969170d1c..f03e43b1b5f6 100644 --- a/drivers/staging/gdm724x/gdm_mux.c +++ b/drivers/staging/gdm724x/gdm_mux.c @@ -664,9 +664,8 @@ static int __init gdm_usb_mux_init(void) static void __exit gdm_usb_mux_exit(void) { - unregister_lte_tty_driver(); - usb_deregister(&gdm_mux_driver); + unregister_lte_tty_driver(); } module_init(gdm_usb_mux_init); diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c index c6c3de94adaa..edb607a45505 100644 --- a/drivers/staging/media/lirc/lirc_sir.c +++ b/drivers/staging/media/lirc/lirc_sir.c @@ -227,6 +227,7 @@ static int init_chrdev(void) if (!rcdev) return -ENOMEM; + rcdev->input_name = "SIR IrDA port"; rcdev->input_phys = KBUILD_MODNAME "/input0"; rcdev->input_id.bustype = BUS_HOST; rcdev->input_id.vendor = 0x0001; @@ -234,6 +235,7 @@ static int init_chrdev(void) rcdev->input_id.version = 0x0100; rcdev->tx_ir = sir_tx_ir; rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; + rcdev->driver_name = KBUILD_MODNAME; rcdev->map_name = RC_MAP_RC6_MCE; rcdev->timeout = IR_DEFAULT_TIMEOUT; rcdev->dev.parent = &sir_ir_dev->dev; @@ -740,7 +742,13 @@ static int init_sir_ir(void) static int sir_ir_probe(struct platform_device *dev) { - return 0; + int retval; + + retval = init_chrdev(); + if (retval < 0) + return retval; + + return init_sir_ir(); } static int sir_ir_remove(struct platform_device *dev) @@ -780,18 +788,8 @@ static int __init sir_ir_init(void) goto pdev_add_fail; } - retval = init_chrdev(); - if (retval < 0) - goto fail; - - retval = init_sir_ir(); - if (retval) - goto fail; - return 0; -fail: - platform_device_del(sir_ir_dev); pdev_add_fail: platform_device_put(sir_ir_dev); pdev_alloc_fail: diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c index 1ae6a64c7fd4..dc11a05be8c4 100644 --- a/drivers/staging/vt6656/usbpipe.c +++ b/drivers/staging/vt6656/usbpipe.c @@ -47,15 +47,25 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value, u16 index, u16 length, u8 *buffer) { int status = 0; + u8 *usb_buffer; if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) return STATUS_FAILURE; mutex_lock(&priv->usb_lock); + usb_buffer = kmemdup(buffer, length, GFP_KERNEL); + if (!usb_buffer) { + mutex_unlock(&priv->usb_lock); + return -ENOMEM; + } + status = usb_control_msg(priv->usb, - usb_sndctrlpipe(priv->usb, 0), request, 0x40, value, - index, buffer, length, USB_CTL_WAIT); + usb_sndctrlpipe(priv->usb, 0), + request, 0x40, value, + index, usb_buffer, length, USB_CTL_WAIT); + + kfree(usb_buffer); mutex_unlock(&priv->usb_lock); @@ -75,15 +85,28 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, u16 index, u16 length, u8 *buffer) { int status; + u8 *usb_buffer; if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) return STATUS_FAILURE; mutex_lock(&priv->usb_lock); + usb_buffer = kmalloc(length, GFP_KERNEL); + if (!usb_buffer) { + mutex_unlock(&priv->usb_lock); + return -ENOMEM; + } + status = usb_control_msg(priv->usb, - usb_rcvctrlpipe(priv->usb, 0), request, 0xc0, value, - index, buffer, length, USB_CTL_WAIT); + usb_rcvctrlpipe(priv->usb, 0), + request, 0xc0, value, + index, usb_buffer, length, USB_CTL_WAIT); + + if (status == length) + memcpy(buffer, usb_buffer, length); + + kfree(usb_buffer); mutex_unlock(&priv->usb_lock); diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c index 2eebc6215cac..bdf11c9f44a8 100644 --- a/drivers/staging/wilc1000/linux_wlan.c +++ b/drivers/staging/wilc1000/linux_wlan.c @@ -1251,11 +1251,12 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, else strcpy(ndev->name, "p2p%d"); - vif->idx = wl->vif_num; vif->wilc = *wilc; vif->ndev = ndev; wl->vif[i] = vif; wl->vif_num = i; + vif->idx = wl->vif_num; + ndev->netdev_ops = &wilc_netdev_ops; { diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index e3f9ed3690b7..8beed3451346 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4683,6 +4683,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) continue; } atomic_set(&sess->session_reinstatement, 1); + atomic_set(&sess->session_fall_back_to_erl0, 1); spin_unlock(&sess->conn_lock); list_move_tail(&se_sess->sess_list, &free_list); diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 344e8448869c..96d9c73af1ae 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1528,6 +1528,7 @@ static void lio_tpg_close_session(struct se_session *se_sess) return; } atomic_set(&sess->session_reinstatement, 1); + atomic_set(&sess->session_fall_back_to_erl0, 1); spin_unlock(&sess->conn_lock); iscsit_stop_time2retain_timer(sess); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index ad8f3011bdc2..66238477137b 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -208,6 +208,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) initiatorname_param->value) && (sess_p->sess_ops->SessionType == sessiontype))) { atomic_set(&sess_p->session_reinstatement, 1); + atomic_set(&sess_p->session_fall_back_to_erl0, 1); spin_unlock(&sess_p->conn_lock); iscsit_inc_session_usage_count(sess_p); iscsit_stop_time2retain_timer(sess_p); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 87aa376a1a1a..e00050ccb61d 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -595,8 +595,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, if (ret < 0) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - if (ret) - target_complete_cmd(cmd, SAM_STAT_GOOD); + target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; } diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index c194063f169b..6ec390bb178e 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -507,8 +507,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes * been failed with a non-zero SCSI status. */ if (cmd->scsi_status) { - pr_err("compare_and_write_callback: non zero scsi_status:" + pr_debug("compare_and_write_callback: non zero scsi_status:" " 0x%02x\n", cmd->scsi_status); + *post_ret = 1; + if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION) + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out; } diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 66b59a15780d..65799575c666 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -216,16 +216,11 @@ static int pty_signal(struct tty_struct *tty, int sig) static void pty_flush_buffer(struct tty_struct *tty) { struct tty_struct *to = tty->link; - struct tty_ldisc *ld; if (!to) return; - ld = tty_ldisc_ref(to); - tty_buffer_flush(to, ld); - if (ld) - tty_ldisc_deref(ld); - + tty_buffer_flush(to, NULL); if (to->packet) { spin_lock_irq(&tty->ctrl_lock); tty->ctrl_status |= TIOCPKT_FLUSHWRITE; diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index b0a377725d63..f2503d862f3a 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2470,19 +2470,34 @@ static int __init pl011_early_console_setup(struct earlycon_device *device, if (!device->port.membase) return -ENODEV; - /* On QDF2400 SOCs affected by Erratum 44, the "qdf2400_e44" must - * also be specified, e.g. "earlycon=pl011,
,qdf2400_e44". - */ - if (!strcmp(device->options, "qdf2400_e44")) - device->con->write = qdf2400_e44_early_write; - else - device->con->write = pl011_early_write; + device->con->write = pl011_early_write; return 0; } OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup); OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup); -EARLYCON_DECLARE(qdf2400_e44, pl011_early_console_setup); + +/* + * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by + * Erratum 44, traditional earlycon can be enabled by specifying + * "earlycon=qdf2400_e44,
". Any options are ignored. + * + * Alternatively, you can just specify "earlycon", and the early console + * will be enabled with the information from the SPCR table. In this + * case, the SPCR code will detect the need for the E44 work-around, + * and set the console name to "qdf2400_e44". + */ +static int __init +qdf2400_e44_early_console_setup(struct earlycon_device *device, + const char *opt) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = qdf2400_e44_early_write; + return 0; +} +EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup); #else #define AMBA_CONSOLE NULL diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 6c6f82ad8d5c..e4210b9ad0d3 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -1767,7 +1767,8 @@ static int serial_omap_probe(struct platform_device *pdev) return 0; err_add_port: - pm_runtime_put(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); pm_qos_remove_request(&up->pm_qos_request); device_init_wakeup(up->dev, false); @@ -1780,9 +1781,13 @@ static int serial_omap_remove(struct platform_device *dev) { struct uart_omap_port *up = platform_get_drvdata(dev); + pm_runtime_get_sync(up->dev); + + uart_remove_one_port(&serial_omap_reg, &up->port); + + pm_runtime_dont_use_autosuspend(up->dev); pm_runtime_put_sync(up->dev); pm_runtime_disable(up->dev); - uart_remove_one_port(&serial_omap_reg, &up->port); pm_qos_remove_request(&up->pm_qos_request); device_init_wakeup(&dev->dev, false); diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 7a17aedbf902..43c84c9bb904 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -860,6 +860,7 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p) { struct s3c24xx_uart_dma *dma = p->dma; unsigned long flags; + int ret; /* Default slave configuration parameters */ dma->rx_conf.direction = DMA_DEV_TO_MEM; @@ -884,8 +885,8 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p) dma->tx_chan = dma_request_chan(p->port.dev, "tx"); if (IS_ERR(dma->tx_chan)) { - dma_release_channel(dma->rx_chan); - return PTR_ERR(dma->tx_chan); + ret = PTR_ERR(dma->tx_chan); + goto err_release_rx; } dmaengine_slave_config(dma->tx_chan, &dma->tx_conf); @@ -894,26 +895,42 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p) dma->rx_size = PAGE_SIZE; dma->rx_buf = kmalloc(dma->rx_size, GFP_KERNEL); - if (!dma->rx_buf) { - dma_release_channel(dma->rx_chan); - dma_release_channel(dma->tx_chan); - return -ENOMEM; + ret = -ENOMEM; + goto err_release_tx; } - dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf, + dma->rx_addr = dma_map_single(p->port.dev, dma->rx_buf, dma->rx_size, DMA_FROM_DEVICE); + if (dma_mapping_error(p->port.dev, dma->rx_addr)) { + ret = -EIO; + goto err_free_rx; + } spin_lock_irqsave(&p->port.lock, flags); /* TX buffer */ - dma->tx_addr = dma_map_single(dma->tx_chan->device->dev, - p->port.state->xmit.buf, + dma->tx_addr = dma_map_single(p->port.dev, p->port.state->xmit.buf, UART_XMIT_SIZE, DMA_TO_DEVICE); spin_unlock_irqrestore(&p->port.lock, flags); + if (dma_mapping_error(p->port.dev, dma->tx_addr)) { + ret = -EIO; + goto err_unmap_rx; + } return 0; + +err_unmap_rx: + dma_unmap_single(p->port.dev, dma->rx_addr, dma->rx_size, + DMA_FROM_DEVICE); +err_free_rx: + kfree(dma->rx_buf); +err_release_tx: + dma_release_channel(dma->tx_chan); +err_release_rx: + dma_release_channel(dma->rx_chan); + return ret; } static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p) @@ -922,7 +939,7 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p) if (dma->rx_chan) { dmaengine_terminate_all(dma->rx_chan); - dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr, + dma_unmap_single(p->port.dev, dma->rx_addr, dma->rx_size, DMA_FROM_DEVICE); kfree(dma->rx_buf); dma_release_channel(dma->rx_chan); @@ -931,7 +948,7 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p) if (dma->tx_chan) { dmaengine_terminate_all(dma->tx_chan); - dma_unmap_single(dma->tx_chan->device->dev, dma->tx_addr, + dma_unmap_single(p->port.dev, dma->tx_addr, UART_XMIT_SIZE, DMA_TO_DEVICE); dma_release_channel(dma->tx_chan); dma->tx_chan = NULL; diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 8fda45a45bd3..08669fee6d7f 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -58,7 +58,6 @@ MODULE_DEVICE_TABLE (usb, wdm_ids); #define WDM_SUSPENDING 8 #define WDM_RESETTING 9 #define WDM_OVERFLOW 10 -#define WDM_DRAIN_ON_OPEN 11 #define WDM_MAX 16 @@ -182,7 +181,7 @@ static void wdm_in_callback(struct urb *urb) "nonzero urb status received: -ESHUTDOWN\n"); goto skip_error; case -EPIPE: - dev_dbg(&desc->intf->dev, + dev_err(&desc->intf->dev, "nonzero urb status received: -EPIPE\n"); break; default: @@ -210,25 +209,6 @@ static void wdm_in_callback(struct urb *urb) desc->reslength = length; } } - - /* - * Handling devices with the WDM_DRAIN_ON_OPEN flag set: - * If desc->resp_count is unset, then the urb was submitted - * without a prior notification. If the device returned any - * data, then this implies that it had messages queued without - * notifying us. Continue reading until that queue is flushed. - */ - if (!desc->resp_count) { - if (!length) { - /* do not propagate the expected -EPIPE */ - desc->rerr = 0; - goto unlock; - } - dev_dbg(&desc->intf->dev, "got %d bytes without notification\n", length); - set_bit(WDM_RESPONDING, &desc->flags); - usb_submit_urb(desc->response, GFP_ATOMIC); - } - skip_error: set_bit(WDM_READ, &desc->flags); wake_up(&desc->wait); @@ -243,7 +223,6 @@ static void wdm_in_callback(struct urb *urb) service_outstanding_interrupt(desc); } -unlock: spin_unlock(&desc->iuspin); } @@ -686,17 +665,6 @@ static int wdm_open(struct inode *inode, struct file *file) dev_err(&desc->intf->dev, "Error submitting int urb - %d\n", rv); rv = usb_translate_errors(rv); - } else if (test_bit(WDM_DRAIN_ON_OPEN, &desc->flags)) { - /* - * Some devices keep pending messages queued - * without resending notifications. We must - * flush the message queue before we can - * assume a one-to-one relationship between - * notifications and messages in the queue - */ - dev_dbg(&desc->intf->dev, "draining queued data\n"); - set_bit(WDM_RESPONDING, &desc->flags); - rv = usb_submit_urb(desc->response, GFP_KERNEL); } } else { rv = 0; @@ -803,8 +771,7 @@ static void wdm_rxwork(struct work_struct *work) /* --- hotplug --- */ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor *ep, - u16 bufsize, int (*manage_power)(struct usb_interface *, int), - bool drain_on_open) + u16 bufsize, int (*manage_power)(struct usb_interface *, int)) { int rv = -ENOMEM; struct wdm_device *desc; @@ -891,68 +858,6 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor desc->manage_power = manage_power; - /* - * "drain_on_open" enables a hack to work around a firmware - * issue observed on network functions, in particular MBIM - * functions. - * - * Quoting section 7 of the CDC-WMC r1.1 specification: - * - * "The firmware shall interpret GetEncapsulatedResponse as a - * request to read response bytes. The firmware shall send - * the next wLength bytes from the response. The firmware - * shall allow the host to retrieve data using any number of - * GetEncapsulatedResponse requests. The firmware shall - * return a zero- length reply if there are no data bytes - * available. - * - * The firmware shall send ResponseAvailable notifications - * periodically, using any appropriate algorithm, to inform - * the host that there is data available in the reply - * buffer. The firmware is allowed to send ResponseAvailable - * notifications even if there is no data available, but - * this will obviously reduce overall performance." - * - * These requirements, although they make equally sense, are - * often not implemented by network functions. Some firmwares - * will queue data indefinitely, without ever resending a - * notification. The result is that the driver and firmware - * loses "syncronization" if the driver ever fails to respond - * to a single notification, something which easily can happen - * on release(). When this happens, the driver will appear to - * never receive notifications for the most current data. Each - * notification will only cause a single read, which returns - * the oldest data in the firmware's queue. - * - * The "drain_on_open" hack resolves the situation by draining - * data from the firmware until none is returned, without a - * prior notification. - * - * This will inevitably race with the firmware, risking that - * we read data from the device before handling the associated - * notification. To make things worse, some of the devices - * needing the hack do not implement the "return zero if no - * data is available" requirement either. Instead they return - * an error on the subsequent read in this case. This means - * that "winning" the race can cause an unexpected EIO to - * userspace. - * - * "winning" the race is more likely on resume() than on - * open(), and the unexpected error is more harmful in the - * middle of an open session. The hack is therefore only - * applied on open(), and not on resume() where it logically - * would be equally necessary. So we define open() as the only - * driver <-> device "syncronization point". Should we happen - * to lose a notification after open(), then syncronization - * will be lost until release() - * - * The hack should not be enabled for CDC WDM devices - * conforming to the CDC-WMC r1.1 specification. This is - * ensured by setting drain_on_open to false in wdm_probe(). - */ - if (drain_on_open) - set_bit(WDM_DRAIN_ON_OPEN, &desc->flags); - spin_lock(&wdm_device_list_lock); list_add(&desc->device_list, &wdm_device_list); spin_unlock(&wdm_device_list_lock); @@ -1006,7 +911,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) goto err; ep = &iface->endpoint[0].desc; - rv = wdm_create(intf, ep, maxcom, &wdm_manage_power, false); + rv = wdm_create(intf, ep, maxcom, &wdm_manage_power); err: return rv; @@ -1038,7 +943,7 @@ struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf, { int rv = -EINVAL; - rv = wdm_create(intf, ep, bufsize, manage_power, true); + rv = wdm_create(intf, ep, bufsize, manage_power); if (rv < 0) goto err; diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index cdee5130638b..eb87a259d55c 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -1331,6 +1331,24 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) */ if (udev->parent && !PMSG_IS_AUTO(msg)) status = 0; + + /* + * If the device is inaccessible, don't try to resume + * suspended interfaces and just return the error. + */ + if (status && status != -EBUSY) { + int err; + u16 devstat; + + err = usb_get_status(udev, USB_RECIP_DEVICE, 0, + &devstat); + if (err) { + dev_err(&udev->dev, + "Failed to suspend device, error %d\n", + status); + goto done; + } + } } /* If the suspend failed, resume interfaces that did get suspended */ @@ -1763,6 +1781,9 @@ static int autosuspend_check(struct usb_device *udev) int w, i; struct usb_interface *intf; + if (udev->state == USB_STATE_NOTATTACHED) + return -ENODEV; + /* Fail if autosuspend is disabled, or any interfaces are in use, or * any interface drivers require remote wakeup but it isn't available. */ diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index e26bd5e773ad..87ad6b6bfee8 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -29,6 +29,7 @@ #define MAX_USB_MINORS 256 static const struct file_operations *usb_minors[MAX_USB_MINORS]; static DECLARE_RWSEM(minor_rwsem); +static DEFINE_MUTEX(init_usb_class_mutex); static int usb_open(struct inode *inode, struct file *file) { @@ -111,8 +112,9 @@ static void release_usb_class(struct kref *kref) static void destroy_usb_class(void) { - if (usb_class) - kref_put(&usb_class->kref, release_usb_class); + mutex_lock(&init_usb_class_mutex); + kref_put(&usb_class->kref, release_usb_class); + mutex_unlock(&init_usb_class_mutex); } int usb_major_init(void) @@ -173,7 +175,10 @@ int usb_register_dev(struct usb_interface *intf, if (intf->minor >= 0) return -EADDRINUSE; + mutex_lock(&init_usb_class_mutex); retval = init_usb_class(); + mutex_unlock(&init_usb_class_mutex); + if (retval) return retval; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 5286bf67869a..9dca59ef18b3 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1066,6 +1066,9 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) portstatus = portchange = 0; status = hub_port_status(hub, port1, &portstatus, &portchange); + if (status) + goto abort; + if (udev || (portstatus & USB_PORT_STAT_CONNECTION)) dev_dbg(&port_dev->dev, "status %04x change %04x\n", portstatus, portchange); @@ -1198,7 +1201,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) /* Scan all ports that need attention */ kick_hub_wq(hub); - + abort: if (type == HUB_INIT2 || type == HUB_INIT3) { /* Allow autosuspend if it was suppressed */ disconnected: @@ -2084,6 +2087,12 @@ void usb_disconnect(struct usb_device **pdev) dev_info(&udev->dev, "USB disconnect, device number %d\n", udev->devnum); + /* + * Ensure that the pm runtime code knows that the USB device + * is in the process of being disconnected. + */ + pm_runtime_barrier(&udev->dev); + usb_lock_device(udev); hub_disconnect_children(udev); diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 8ad203296079..f3ee80ece682 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -460,6 +460,7 @@ config USB_CONFIGFS_F_TCM choice tristate "USB Gadget Drivers" default USB_ETH + optional help A Linux "Gadget Driver" talks to the USB Peripheral Controller driver through the abstract "gadget" API. Some other operating diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index ba1853f4e407..3f8f28f6fa94 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1502,6 +1502,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, */ max_esit_payload = xhci_get_max_esit_payload(udev, ep); interval = xhci_get_endpoint_interval(udev, ep); + + /* Periodic endpoint bInterval limit quirk */ + if (usb_endpoint_xfer_int(&ep->desc) || + usb_endpoint_xfer_isoc(&ep->desc)) { + if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) && + udev->speed >= USB_SPEED_HIGH && + interval >= 7) { + interval = 6; + } + } + mult = xhci_get_endpoint_mult(udev, ep); max_packet = usb_endpoint_maxp(&ep->desc); max_burst = xhci_get_endpoint_max_burst(udev, ep); @@ -2480,7 +2491,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | xhci->cmd_ring->cycle_state; xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Setting command ring address to 0x%x", val); + "// Setting command ring address to 0x%016llx", val_64); xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); xhci_dbg_cmd_ptrs(xhci); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index fc99f51d12e1..7b86508ac8cf 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -199,6 +199,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x1042) xhci->quirks |= XHCI_BROKEN_STREAMS; + if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) + xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; + if (xhci->quirks & XHCI_RESET_ON_RESUME) xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "QUIRK: Resetting on resume"); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index da3eb695fe54..2496bd6304ca 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1818,6 +1818,7 @@ struct xhci_hcd { #define XHCI_MISSING_CAS (1 << 24) /* For controller with a broken Port Disable implementation */ #define XHCI_BROKEN_PORT_PED (1 << 25) +#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) unsigned int num_active_eps; unsigned int limit_active_eps; diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 17c081068257..26ae5d1a2a4e 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -159,6 +159,7 @@ get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf) case USB_ENDPOINT_XFER_INT: if (dev->info->intr) goto try_intr; + continue; case USB_ENDPOINT_XFER_ISOC: if (dev->info->iso) goto try_iso; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index c540de15aad2..03e6319b6d1c 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -873,6 +873,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID, USB_CLASS_VENDOR_SPEC, USB_SUBCLASS_VENDOR_SPEC, 0x00) }, + { USB_DEVICE_INTERFACE_NUMBER(ACTEL_VID, MICROSEMI_ARROW_SF2PLUS_BOARD_PID, 2) }, { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 48ee04c94a75..71fb9e59db71 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -873,6 +873,12 @@ #define FIC_VID 0x1457 #define FIC_NEO1973_DEBUG_PID 0x5118 +/* + * Actel / Microsemi + */ +#define ACTEL_VID 0x1514 +#define MICROSEMI_ARROW_SF2PLUS_BOARD_PID 0x2008 + /* Olimex */ #define OLIMEX_VID 0x15BA #define OLIMEX_ARM_USB_OCD_PID 0x0003 diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 32d2633092a3..a8a079ba9477 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -246,69 +246,46 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) return ret; } -struct vwork { - struct mm_struct *mm; - long npage; - struct work_struct work; -}; - -/* delayed decrement/increment for locked_vm */ -static void vfio_lock_acct_bg(struct work_struct *work) -{ - struct vwork *vwork = container_of(work, struct vwork, work); - struct mm_struct *mm; - - mm = vwork->mm; - down_write(&mm->mmap_sem); - mm->locked_vm += vwork->npage; - up_write(&mm->mmap_sem); - mmput(mm); - kfree(vwork); -} - -static void vfio_lock_acct(struct task_struct *task, long npage) +static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap) { - struct vwork *vwork; struct mm_struct *mm; bool is_current; + int ret; if (!npage) - return; + return 0; is_current = (task->mm == current->mm); mm = is_current ? task->mm : get_task_mm(task); if (!mm) - return; /* process exited */ + return -ESRCH; /* process exited */ - if (down_write_trylock(&mm->mmap_sem)) { - mm->locked_vm += npage; - up_write(&mm->mmap_sem); - if (!is_current) - mmput(mm); - return; - } + ret = down_write_killable(&mm->mmap_sem); + if (!ret) { + if (npage > 0) { + if (lock_cap ? !*lock_cap : + !has_capability(task, CAP_IPC_LOCK)) { + unsigned long limit; + + limit = task_rlimit(task, + RLIMIT_MEMLOCK) >> PAGE_SHIFT; + + if (mm->locked_vm + npage > limit) + ret = -ENOMEM; + } + } + + if (!ret) + mm->locked_vm += npage; - if (is_current) { - mm = get_task_mm(task); - if (!mm) - return; + up_write(&mm->mmap_sem); } - /* - * Couldn't get mmap_sem lock, so must setup to update - * mm->locked_vm later. If locked_vm were atomic, we - * wouldn't need this silliness - */ - vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL); - if (WARN_ON(!vwork)) { + if (!is_current) mmput(mm); - return; - } - INIT_WORK(&vwork->work, vfio_lock_acct_bg); - vwork->mm = mm; - vwork->npage = npage; - schedule_work(&vwork->work); + + return ret; } /* @@ -405,7 +382,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, long npage, unsigned long *pfn_base) { - unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; bool lock_cap = capable(CAP_IPC_LOCK); long ret, pinned = 0, lock_acct = 0; bool rsvd; @@ -442,8 +419,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, /* Lock all the consecutive pages from pfn_base */ for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { - unsigned long pfn = 0; - ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); if (ret) break; @@ -460,14 +435,25 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, put_pfn(pfn, dma->prot); pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, limit << PAGE_SHIFT); - break; + ret = -ENOMEM; + goto unpin_out; } lock_acct++; } } out: - vfio_lock_acct(current, lock_acct); + ret = vfio_lock_acct(current, lock_acct, &lock_cap); + +unpin_out: + if (ret) { + if (!rsvd) { + for (pfn = *pfn_base ; pinned ; pfn++, pinned--) + put_pfn(pfn, dma->prot); + } + + return ret; + } return pinned; } @@ -488,7 +474,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, } if (do_accounting) - vfio_lock_acct(dma->task, locked - unlocked); + vfio_lock_acct(dma->task, locked - unlocked, NULL); return unlocked; } @@ -522,8 +508,14 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, goto pin_page_exit; } - if (!rsvd && do_accounting) - vfio_lock_acct(dma->task, 1); + if (!rsvd && do_accounting) { + ret = vfio_lock_acct(dma->task, 1, &lock_cap); + if (ret) { + put_pfn(*pfn_base, dma->prot); + goto pin_page_exit; + } + } + ret = 1; pin_page_exit: @@ -543,7 +535,7 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); if (do_accounting) - vfio_lock_acct(dma->task, -unlocked); + vfio_lock_acct(dma->task, -unlocked, NULL); return unlocked; } @@ -740,7 +732,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, dma->iommu_mapped = false; if (do_accounting) { - vfio_lock_acct(dma->task, -unlocked); + vfio_lock_acct(dma->task, -unlocked, NULL); return 0; } return unlocked; @@ -1382,7 +1374,7 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) if (!is_invalid_reserved_pfn(vpfn->pfn)) locked++; } - vfio_lock_acct(dma->task, locked - unlocked); + vfio_lock_acct(dma->task, locked - unlocked, NULL); } } diff --git a/fs/block_dev.c b/fs/block_dev.c index 56039dfbc674..c2a7ec8e9c03 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -103,12 +103,11 @@ void invalidate_bdev(struct block_device *bdev) { struct address_space *mapping = bdev->bd_inode->i_mapping; - if (mapping->nrpages == 0) - return; - - invalidate_bh_lrus(); - lru_add_drain_all(); /* make sure all lru add caches are flushed */ - invalidate_mapping_pages(mapping, 0, -1); + if (mapping->nrpages) { + invalidate_bh_lrus(); + lru_add_drain_all(); /* make sure all lru add caches are flushed */ + invalidate_mapping_pages(mapping, 0, -1); + } /* 99% of the time, we don't need to flush the cleancache on the bdev. * But, for the strange corners, lets be cautious */ diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index febc28f9e2c2..75267cdd5dfd 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -392,6 +392,7 @@ static int __set_xattr(struct ceph_inode_info *ci, if (update_xattr) { int err = 0; + if (xattr && (flags & XATTR_CREATE)) err = -EEXIST; else if (!xattr && (flags & XATTR_REPLACE)) @@ -399,12 +400,14 @@ static int __set_xattr(struct ceph_inode_info *ci, if (err) { kfree(name); kfree(val); + kfree(*newxattr); return err; } if (update_xattr < 0) { if (xattr) __remove_xattr(ci, xattr); kfree(name); + kfree(*newxattr); return 0; } } diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 02b071bf3732..a0b3e7d1be48 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -83,6 +83,9 @@ convert_sfm_char(const __u16 src_char, char *target) case SFM_COLON: *target = ':'; break; + case SFM_DOUBLEQUOTE: + *target = '"'; + break; case SFM_ASTERISK: *target = '*'; break; @@ -418,6 +421,9 @@ static __le16 convert_to_sfm_char(char src_char, bool end_of_string) case ':': dest_char = cpu_to_le16(SFM_COLON); break; + case '"': + dest_char = cpu_to_le16(SFM_DOUBLEQUOTE); + break; case '*': dest_char = cpu_to_le16(SFM_ASTERISK); break; diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h index 3d7298cc0aeb..8a79a34e66b8 100644 --- a/fs/cifs/cifs_unicode.h +++ b/fs/cifs/cifs_unicode.h @@ -57,6 +57,7 @@ * not conflict (although almost does) with the mapping above. */ +#define SFM_DOUBLEQUOTE ((__u16) 0xF020) #define SFM_ASTERISK ((__u16) 0xF021) #define SFM_QUESTION ((__u16) 0xF025) #define SFM_COLON ((__u16) 0xF022) @@ -64,8 +65,8 @@ #define SFM_LESSTHAN ((__u16) 0xF023) #define SFM_PIPE ((__u16) 0xF027) #define SFM_SLASH ((__u16) 0xF026) -#define SFM_PERIOD ((__u16) 0xF028) -#define SFM_SPACE ((__u16) 0xF029) +#define SFM_SPACE ((__u16) 0xF028) +#define SFM_PERIOD ((__u16) 0xF029) /* * Mapping mechanism to use when one of the seven reserved characters is diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index dd3f5fabfdf6..aa553d0b58db 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -87,6 +87,7 @@ extern mempool_t *cifs_req_poolp; extern mempool_t *cifs_mid_poolp; struct workqueue_struct *cifsiod_wq; +struct workqueue_struct *cifsoplockd_wq; __u32 cifs_lock_secret; /* @@ -1369,9 +1370,16 @@ init_cifs(void) goto out_clean_proc; } + cifsoplockd_wq = alloc_workqueue("cifsoplockd", + WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); + if (!cifsoplockd_wq) { + rc = -ENOMEM; + goto out_destroy_cifsiod_wq; + } + rc = cifs_fscache_register(); if (rc) - goto out_destroy_wq; + goto out_destroy_cifsoplockd_wq; rc = cifs_init_inodecache(); if (rc) @@ -1419,7 +1427,9 @@ init_cifs(void) cifs_destroy_inodecache(); out_unreg_fscache: cifs_fscache_unregister(); -out_destroy_wq: +out_destroy_cifsoplockd_wq: + destroy_workqueue(cifsoplockd_wq); +out_destroy_cifsiod_wq: destroy_workqueue(cifsiod_wq); out_clean_proc: cifs_proc_clean(); @@ -1442,6 +1452,7 @@ exit_cifs(void) cifs_destroy_mids(); cifs_destroy_inodecache(); cifs_fscache_unregister(); + destroy_workqueue(cifsoplockd_wq); destroy_workqueue(cifsiod_wq); cifs_proc_clean(); } diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 37f5a41cc50c..17f0e732eedc 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -1683,6 +1683,7 @@ void cifs_oplock_break(struct work_struct *work); extern const struct slow_work_ops cifs_oplock_break_ops; extern struct workqueue_struct *cifsiod_wq; +extern struct workqueue_struct *cifsoplockd_wq; extern __u32 cifs_lock_secret; extern mempool_t *cifs_mid_poolp; diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 5d21f00ae341..205fd94f52fd 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -718,6 +718,9 @@ CIFSSMBEcho(struct TCP_Server_Info *server) if (rc) return rc; + if (server->capabilities & CAP_UNICODE) + smb->hdr.Flags2 |= SMBFLG2_UNICODE; + /* set up echo request */ smb->hdr.Tid = 0xffff; smb->hdr.WordCount = 1; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index d82467cfb0e2..d95744d8b8ab 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2912,16 +2912,14 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data) { struct cifs_sb_info *old = CIFS_SB(sb); struct cifs_sb_info *new = mnt_data->cifs_sb; + bool old_set = old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH; + bool new_set = new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH; - if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) { - if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)) - return 0; - /* The prepath should be null terminated strings */ - if (strcmp(new->prepath, old->prepath)) - return 0; - + if (old_set && new_set && !strcmp(new->prepath, old->prepath)) return 1; - } + else if (!old_set && !new_set) + return 1; + return 0; } diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index 265c45fe4ea5..76fb0917dc8c 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c @@ -74,7 +74,8 @@ static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file, rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0, src_inode->i_size, 0); - + if (rc > 0) + rc = 0; out_fput: fdput(src_file); out_drop_write: @@ -208,10 +209,14 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) rc = -EOPNOTSUPP; break; case CIFS_IOC_GET_MNT_INFO: + if (pSMBFile == NULL) + break; tcon = tlink_tcon(pSMBFile->tlink); rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg); break; case CIFS_ENUMERATE_SNAPSHOTS: + if (pSMBFile == NULL) + break; if (arg == 0) { rc = -EINVAL; goto cifs_ioc_exit; diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index d3fb11529ed9..b578c6d09597 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -492,7 +492,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &pCifsInode->flags); - queue_work(cifsiod_wq, + queue_work(cifsoplockd_wq, &netfile->oplock_break); netfile->oplock_break_cancelled = false; diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 1a04b3a5beb1..7b08a1446a7f 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -499,7 +499,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, else cfile->oplock_break_cancelled = true; - queue_work(cifsiod_wq, &cfile->oplock_break); + queue_work(cifsoplockd_wq, &cfile->oplock_break); kfree(lw); return true; } @@ -643,7 +643,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags); spin_unlock(&cfile->file_info_lock); - queue_work(cifsiod_wq, &cfile->oplock_break); + queue_work(cifsoplockd_wq, + &cfile->oplock_break); spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 152e37f2ad92..c58691834eb2 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -942,6 +942,7 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, } if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) { rc = -ERANGE; + kfree(retbuf); return rc; } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 02da648041fc..0fd63f0bc440 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -632,8 +632,12 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) } if (rsplen != sizeof(struct validate_negotiate_info_rsp)) { - cifs_dbg(VFS, "invalid size of protocol negotiate response\n"); - return -EIO; + cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n", + rsplen); + + /* relax check since Mac returns max bufsize allowed on ioctl */ + if (rsplen > CIFSMaxBufSize) + return -EIO; } /* check validate negotiate info response matches what we got earlier */ @@ -1853,8 +1857,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, * than one credit. Windows typically sets this smaller, but for some * ioctls it may be useful to allow server to send more. No point * limiting what the server can send as long as fits in one credit + * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE + * (by default, note that it can be overridden to make max larger) + * in responses (except for read responses which can be bigger. + * We may want to bump this limit up */ - req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */ + req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize); if (is_fsctl) req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 37b49894c762..15bf9c31a34d 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -300,7 +300,7 @@ int fscrypt_fname_disk_to_usr(struct inode *inode, } else { memset(buf, 0, 8); } - memcpy(buf + 8, iname->name + iname->len - 16, 16); + memcpy(buf + 8, iname->name + ((iname->len - 17) & ~15), 16); oname->name[0] = '_'; oname->len = 1 + digest_encode(buf, 24, oname->name + 1); return 0; diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 4908906d54d5..fc3660d82a7f 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -143,27 +143,61 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) } EXPORT_SYMBOL(fscrypt_ioctl_get_policy); +/** + * fscrypt_has_permitted_context() - is a file's encryption policy permitted + * within its directory? + * + * @parent: inode for parent directory + * @child: inode for file being looked up, opened, or linked into @parent + * + * Filesystems must call this before permitting access to an inode in a + * situation where the parent directory is encrypted (either before allowing + * ->lookup() to succeed, or for a regular file before allowing it to be opened) + * and before any operation that involves linking an inode into an encrypted + * directory, including link, rename, and cross rename. It enforces the + * constraint that within a given encrypted directory tree, all files use the + * same encryption policy. The pre-access check is needed to detect potentially + * malicious offline violations of this constraint, while the link and rename + * checks are needed to prevent online violations of this constraint. + * + * Return: 1 if permitted, 0 if forbidden. If forbidden, the caller must fail + * the filesystem operation with EPERM. + */ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) { - struct fscrypt_info *parent_ci, *child_ci; + const struct fscrypt_operations *cops = parent->i_sb->s_cop; + const struct fscrypt_info *parent_ci, *child_ci; + struct fscrypt_context parent_ctx, child_ctx; int res; - if ((parent == NULL) || (child == NULL)) { - printk(KERN_ERR "parent %p child %p\n", parent, child); - BUG_ON(1); - } - /* No restrictions on file types which are never encrypted */ if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) && !S_ISLNK(child->i_mode)) return 1; - /* no restrictions if the parent directory is not encrypted */ - if (!parent->i_sb->s_cop->is_encrypted(parent)) + /* No restrictions if the parent directory is unencrypted */ + if (!cops->is_encrypted(parent)) return 1; - /* if the child directory is not encrypted, this is always a problem */ - if (!parent->i_sb->s_cop->is_encrypted(child)) + + /* Encrypted directories must not contain unencrypted files */ + if (!cops->is_encrypted(child)) return 0; + + /* + * Both parent and child are encrypted, so verify they use the same + * encryption policy. Compare the fscrypt_info structs if the keys are + * available, otherwise retrieve and compare the fscrypt_contexts. + * + * Note that the fscrypt_context retrieval will be required frequently + * when accessing an encrypted directory tree without the key. + * Performance-wise this is not a big deal because we already don't + * really optimize for file access without the key (to the extent that + * such access is even possible), given that any attempted access + * already causes a fscrypt_context retrieval and keyring search. + * + * In any case, if an unexpected error occurs, fall back to "forbidden". + */ + res = fscrypt_get_encryption_info(parent); if (res) return 0; @@ -172,17 +206,32 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) return 0; parent_ci = parent->i_crypt_info; child_ci = child->i_crypt_info; - if (!parent_ci && !child_ci) - return 1; - if (!parent_ci || !child_ci) + + if (parent_ci && child_ci) { + return memcmp(parent_ci->ci_master_key, child_ci->ci_master_key, + FS_KEY_DESCRIPTOR_SIZE) == 0 && + (parent_ci->ci_data_mode == child_ci->ci_data_mode) && + (parent_ci->ci_filename_mode == + child_ci->ci_filename_mode) && + (parent_ci->ci_flags == child_ci->ci_flags); + } + + res = cops->get_context(parent, &parent_ctx, sizeof(parent_ctx)); + if (res != sizeof(parent_ctx)) + return 0; + + res = cops->get_context(child, &child_ctx, sizeof(child_ctx)); + if (res != sizeof(child_ctx)) return 0; - return (memcmp(parent_ci->ci_master_key, - child_ci->ci_master_key, - FS_KEY_DESCRIPTOR_SIZE) == 0 && - (parent_ci->ci_data_mode == child_ci->ci_data_mode) && - (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) && - (parent_ci->ci_flags == child_ci->ci_flags)); + return memcmp(parent_ctx.master_key_descriptor, + child_ctx.master_key_descriptor, + FS_KEY_DESCRIPTOR_SIZE) == 0 && + (parent_ctx.contents_encryption_mode == + child_ctx.contents_encryption_mode) && + (parent_ctx.filenames_encryption_mode == + child_ctx.filenames_encryption_mode) && + (parent_ctx.flags == child_ctx.flags); } EXPORT_SYMBOL(fscrypt_has_permitted_context); diff --git a/fs/dax.c b/fs/dax.c index 85abd741253d..b87f3ab742ba 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -507,35 +507,6 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) } /* - * Invalidate exceptional DAX entry if easily possible. This handles DAX - * entries for invalidate_inode_pages() so we evict the entry only if we can - * do so without blocking. - */ -int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index) -{ - int ret = 0; - void *entry, **slot; - struct radix_tree_root *page_tree = &mapping->page_tree; - - spin_lock_irq(&mapping->tree_lock); - entry = __radix_tree_lookup(page_tree, index, NULL, &slot); - if (!entry || !radix_tree_exceptional_entry(entry) || - slot_locked(mapping, slot)) - goto out; - if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) || - radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) - goto out; - radix_tree_delete(page_tree, index); - mapping->nrexceptional--; - ret = 1; -out: - spin_unlock_irq(&mapping->tree_lock); - if (ret) - dax_wake_mapping_entry_waiter(mapping, index, entry, true); - return ret; -} - -/* * Invalidate exceptional DAX entry if it is clean. */ int dax_invalidate_mapping_entry_sync(struct address_space *mapping, @@ -1032,7 +1003,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, * into page tables. We have to tear down these mappings so that data * written by write(2) is visible in mmap. */ - if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) { + if (iomap->flags & IOMAP_F_NEW) { invalidate_inode_pages2_range(inode->i_mapping, pos >> PAGE_SHIFT, (end - 1) >> PAGE_SHIFT); @@ -1382,6 +1353,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, goto fallback; /* + * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX + * PMD or a HZP entry. If it can't (because a 4k page is already in + * the tree, for instance), it will return -EEXIST and we just fall + * back to 4k entries. + */ + entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); + if (IS_ERR(entry)) + goto fallback; + + /* * Note that we don't use iomap_apply here. We aren't doing I/O, only * setting up a mapping, so really we're using iomap_begin() as a way * to look up our filesystem block. @@ -1389,21 +1370,11 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pos = (loff_t)pgoff << PAGE_SHIFT; error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); if (error) - goto fallback; + goto unlock_entry; if (iomap.offset + iomap.length < pos + PMD_SIZE) goto finish_iomap; - /* - * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX - * PMD or a HZP entry. If it can't (because a 4k page is already in - * the tree, for instance), it will return -EEXIST and we just fall - * back to 4k entries. - */ - entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); - if (IS_ERR(entry)) - goto finish_iomap; - switch (iomap.type) { case IOMAP_MAPPED: result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry); @@ -1411,7 +1382,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, case IOMAP_UNWRITTEN: case IOMAP_HOLE: if (WARN_ON_ONCE(write)) - goto unlock_entry; + break; result = dax_pmd_load_hole(vmf, &iomap, &entry); break; default: @@ -1419,8 +1390,6 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, break; } - unlock_entry: - put_locked_mapping_entry(mapping, pgoff, entry); finish_iomap: if (ops->iomap_end) { int copied = PMD_SIZE; @@ -1436,6 +1405,8 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, &iomap); } + unlock_entry: + put_locked_mapping_entry(mapping, pgoff, entry); fallback: if (result == VM_FAULT_FALLBACK) { split_huge_pmd(vma, vmf->pmd, vmf->address); diff --git a/fs/ext4/file.c b/fs/ext4/file.c index cefa9835f275..831fd6beebf0 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -257,6 +257,7 @@ static int ext4_dax_huge_fault(struct vm_fault *vmf, enum page_entry_size pe_size) { int result; + handle_t *handle = NULL; struct inode *inode = file_inode(vmf->vma->vm_file); struct super_block *sb = inode->i_sb; bool write = vmf->flags & FAULT_FLAG_WRITE; @@ -264,12 +265,24 @@ static int ext4_dax_huge_fault(struct vm_fault *vmf, if (write) { sb_start_pagefault(sb); file_update_time(vmf->vma->vm_file); + down_read(&EXT4_I(inode)->i_mmap_sem); + handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, + EXT4_DATA_TRANS_BLOCKS(sb)); + } else { + down_read(&EXT4_I(inode)->i_mmap_sem); } - down_read(&EXT4_I(inode)->i_mmap_sem); - result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops); - up_read(&EXT4_I(inode)->i_mmap_sem); - if (write) + if (!IS_ERR(handle)) + result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops); + else + result = VM_FAULT_SIGBUS; + if (write) { + if (!IS_ERR(handle)) + ext4_journal_stop(handle); + up_read(&EXT4_I(inode)->i_mmap_sem); sb_end_pagefault(sb); + } else { + up_read(&EXT4_I(inode)->i_mmap_sem); + } return result; } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index b9ffa9f4191f..88203ae5b154 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5874,6 +5874,11 @@ int ext4_page_mkwrite(struct vm_fault *vmf) file_update_time(vma->vm_file); down_read(&EXT4_I(inode)->i_mmap_sem); + + ret = ext4_convert_inline_data(inode); + if (ret) + goto out_ret; + /* Delalloc case is easy... */ if (test_opt(inode->i_sb, DELALLOC) && !ext4_should_journal_data(inode) && diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 07e5e1405771..eb2dea8287c1 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1255,9 +1255,9 @@ static inline int ext4_match(struct ext4_filename *fname, if (unlikely(!name)) { if (fname->usr_fname->name[0] == '_') { int ret; - if (de->name_len < 16) + if (de->name_len <= 32) return 0; - ret = memcmp(de->name + de->name_len - 16, + ret = memcmp(de->name + ((de->name_len - 17) & ~15), fname->crypto_buf.name + 8, 16); return (ret == 0) ? 1 : 0; } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 1602b4bccae6..dd24476e8d2c 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -309,7 +309,7 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, if (type >= META_FLUSH) { io->fio.type = META_FLUSH; io->fio.op = REQ_OP_WRITE; - io->fio.op_flags = REQ_META | REQ_PRIO; + io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC; if (!test_opt(sbi, NOBARRIER)) io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA; } diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 8d5c62b07b28..96e9e7fa64dd 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -130,19 +130,29 @@ struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname, continue; } - /* encrypted case */ + if (de->hash_code != namehash) + goto not_match; + de_name.name = d->filename[bit_pos]; de_name.len = le16_to_cpu(de->name_len); - /* show encrypted name */ - if (fname->hash) { - if (de->hash_code == cpu_to_le32(fname->hash)) - goto found; - } else if (de_name.len == name->len && - de->hash_code == namehash && - !memcmp(de_name.name, name->name, name->len)) +#ifdef CONFIG_F2FS_FS_ENCRYPTION + if (unlikely(!name->name)) { + if (fname->usr_fname->name[0] == '_') { + if (de_name.len > 32 && + !memcmp(de_name.name + ((de_name.len - 17) & ~15), + fname->crypto_buf.name + 8, 16)) + goto found; + goto not_match; + } + name->name = fname->crypto_buf.name; + name->len = fname->crypto_buf.len; + } +#endif + if (de_name.len == name->len && + !memcmp(de_name.name, name->name, name->len)) goto found; - +not_match: if (max_slots && max_len > *max_slots) *max_slots = max_len; max_len = 0; @@ -170,12 +180,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, struct f2fs_dir_entry *de = NULL; bool room = false; int max_slots; - f2fs_hash_t namehash; - - if(fname->hash) - namehash = cpu_to_le32(fname->hash); - else - namehash = f2fs_dentry_hash(&name); + f2fs_hash_t namehash = f2fs_dentry_hash(&name, fname); nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level); nblock = bucket_blocks(level); @@ -207,13 +212,9 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, f2fs_put_page(dentry_page, 0); } - /* This is to increase the speed of f2fs_create */ - if (!de && room) { - F2FS_I(dir)->task = current; - if (F2FS_I(dir)->chash != namehash) { - F2FS_I(dir)->chash = namehash; - F2FS_I(dir)->clevel = level; - } + if (!de && room && F2FS_I(dir)->chash != namehash) { + F2FS_I(dir)->chash = namehash; + F2FS_I(dir)->clevel = level; } return de; @@ -254,6 +255,9 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, break; } out: + /* This is to increase the speed of f2fs_create */ + if (!de) + F2FS_I(dir)->task = current; return de; } @@ -542,7 +546,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, level = 0; slots = GET_DENTRY_SLOTS(new_name->len); - dentry_hash = f2fs_dentry_hash(new_name); + dentry_hash = f2fs_dentry_hash(new_name, NULL); current_depth = F2FS_I(dir)->i_current_depth; if (F2FS_I(dir)->chash == dentry_hash) { diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 0a6e115562f6..05d7e2cefc56 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -2133,7 +2133,8 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi); /* * hash.c */ -f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info); +f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info, + struct fscrypt_name *fname); /* * node.c diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 418fd9881646..b5a62d4a3a69 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -182,7 +182,7 @@ static unsigned int get_max_cost(struct f2fs_sb_info *sbi, if (p->alloc_mode == SSR) return sbi->blocks_per_seg; if (p->gc_mode == GC_GREEDY) - return sbi->blocks_per_seg * p->ofs_unit; + return 2 * sbi->blocks_per_seg * p->ofs_unit; else if (p->gc_mode == GC_CB) return UINT_MAX; else /* No other gc_mode */ diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c index 71b7206c431e..eb2e031ea887 100644 --- a/fs/f2fs/hash.c +++ b/fs/f2fs/hash.c @@ -70,7 +70,8 @@ static void str2hashbuf(const unsigned char *msg, size_t len, *buf++ = pad; } -f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info) +f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info, + struct fscrypt_name *fname) { __u32 hash; f2fs_hash_t f2fs_hash; @@ -79,6 +80,10 @@ f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info) const unsigned char *name = name_info->name; size_t len = name_info->len; + /* encrypted bigname case */ + if (fname && !fname->disk_name.name) + return cpu_to_le32(fname->hash); + if (is_dot_dotdot(name_info)) return 0; diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index e32a9e527968..fa729ff6b2f9 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -296,7 +296,7 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, return NULL; } - namehash = f2fs_dentry_hash(&name); + namehash = f2fs_dentry_hash(&name, fname); inline_dentry = inline_data_addr(ipage); @@ -533,7 +533,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, f2fs_wait_on_page_writeback(ipage, NODE, true); - name_hash = f2fs_dentry_hash(new_name); + name_hash = f2fs_dentry_hash(new_name, NULL); make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2); f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 24bb8213d974..a204f22eba5b 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -316,7 +316,6 @@ int update_inode_page(struct inode *inode) } else if (err != -ENOENT) { f2fs_stop_checkpoint(sbi, false); } - f2fs_inode_synced(inode); return 0; } ret = update_inode(inode, node_page); @@ -448,6 +447,7 @@ void handle_failed_inode(struct inode *inode) * in a panic when flushing dirty inodes in gdirty_list. */ update_inode_page(inode); + f2fs_inode_synced(inode); /* don't make bad inode, since it becomes a regular file. */ unlock_new_inode(inode); diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 98f00a3a7f50..a30f323b7b2b 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -148,8 +148,6 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, inode->i_mapping->a_ops = &f2fs_dblock_aops; ino = inode->i_ino; - f2fs_balance_fs(sbi, true); - f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) @@ -163,6 +161,8 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); + + f2fs_balance_fs(sbi, true); return 0; out: handle_failed_inode(inode); @@ -423,8 +423,6 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, inode_nohighmem(inode); inode->i_mapping->a_ops = &f2fs_dblock_aops; - f2fs_balance_fs(sbi, true); - f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) @@ -487,6 +485,8 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, } kfree(sd); + + f2fs_balance_fs(sbi, true); return err; out: handle_failed_inode(inode); @@ -508,8 +508,6 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO); - f2fs_balance_fs(sbi, true); - set_inode_flag(inode, FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); @@ -524,6 +522,8 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); + + f2fs_balance_fs(sbi, true); return 0; out_fail: @@ -554,8 +554,6 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry, init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &f2fs_special_inode_operations; - f2fs_balance_fs(sbi, true); - f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) @@ -569,6 +567,8 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry, if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); + + f2fs_balance_fs(sbi, true); return 0; out: handle_failed_inode(inode); @@ -595,8 +595,6 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry, inode->i_mapping->a_ops = &f2fs_dblock_aops; } - f2fs_balance_fs(sbi, true); - f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) @@ -622,6 +620,8 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry, /* link_count was changed by d_tmpfile as well. */ f2fs_unlock_op(sbi); unlock_new_inode(inode); + + f2fs_balance_fs(sbi, true); return 0; release_out: diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 29ef7088c558..56670c5058b7 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -416,7 +416,7 @@ static int __submit_flush_wait(struct block_device *bdev) struct bio *bio = f2fs_bio_alloc(0); int ret; - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; + bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; bio->bi_bdev = bdev; ret = submit_bio_wait(bio); bio_put(bio); @@ -1788,15 +1788,14 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, stat_inc_block_count(sbi, curseg); + if (!__has_curseg_space(sbi, type)) + sit_i->s_ops->allocate_segment(sbi, type, false); /* - * SIT information should be updated before segment allocation, - * since SSR needs latest valid block information. + * SIT information should be updated after segment allocation, + * since we need to keep dirty segments precisely under SSR. */ refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); - if (!__has_curseg_space(sbi, type)) - sit_i->s_ops->allocate_segment(sbi, type, false); - mutex_unlock(&sit_i->sentry_lock); if (page && IS_NODESEG(type)) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 858aef564a58..5ca78308d5ec 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1307,7 +1307,7 @@ static int __f2fs_commit_super(struct buffer_head *bh, unlock_buffer(bh); /* it's rare case, we can do fua all the time */ - return __sync_dirty_buffer(bh, REQ_PREFLUSH | REQ_FUA); + return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA); } static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, diff --git a/fs/iomap.c b/fs/iomap.c index 141c3cd55a8b..1c25ae30500e 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -887,16 +887,14 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, flags |= IOMAP_WRITE; } - if (mapping->nrpages) { - ret = filemap_write_and_wait_range(mapping, start, end); - if (ret) - goto out_free_dio; + ret = filemap_write_and_wait_range(mapping, start, end); + if (ret) + goto out_free_dio; - ret = invalidate_inode_pages2_range(mapping, - start >> PAGE_SHIFT, end >> PAGE_SHIFT); - WARN_ON_ONCE(ret); - ret = 0; - } + ret = invalidate_inode_pages2_range(mapping, + start >> PAGE_SHIFT, end >> PAGE_SHIFT); + WARN_ON_ONCE(ret); + ret = 0; inode_dio_begin(inode); @@ -951,7 +949,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, * one is a pretty crazy thing to do, so we don't support it 100%. If * this invalidation fails, tough, the write still worked... */ - if (iov_iter_rw(iter) == WRITE && mapping->nrpages) { + if (iov_iter_rw(iter) == WRITE) { int err = invalidate_inode_pages2_range(mapping, start >> PAGE_SHIFT, end >> PAGE_SHIFT); WARN_ON_ONCE(err); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 5adc2fb62b0f..e768126f6a72 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1348,7 +1348,7 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags) jbd2_superblock_csum_set(journal, sb); get_bh(bh); bh->b_end_io = end_buffer_write_sync; - ret = submit_bh(REQ_OP_WRITE, write_flags, bh); + ret = submit_bh(REQ_OP_WRITE, write_flags | REQ_SYNC, bh); wait_on_buffer(bh); if (buffer_write_io_error(bh)) { clear_buffer_write_io_error(bh); diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index a304bf34b212..5ebe19353da6 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -218,8 +218,7 @@ int orangefs_setattr(struct dentry *dentry, struct iattr *iattr) if (ret) goto out; - if ((iattr->ia_valid & ATTR_SIZE) && - iattr->ia_size != i_size_read(inode)) { + if (iattr->ia_valid & ATTR_SIZE) { ret = orangefs_setattr_size(inode, iattr); if (ret) goto out; diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c index a290ff6ec756..7c315938e9c2 100644 --- a/fs/orangefs/namei.c +++ b/fs/orangefs/namei.c @@ -193,8 +193,6 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry, goto out; } - ORANGEFS_I(inode)->getattr_time = jiffies - 1; - gossip_debug(GOSSIP_NAME_DEBUG, "%s:%s:%d " "Found good inode [%lu] with count [%d]\n", diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c index 74a81b1daaac..237c9c04dc3b 100644 --- a/fs/orangefs/xattr.c +++ b/fs/orangefs/xattr.c @@ -76,11 +76,8 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *name, if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; - if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) { - gossip_err("Invalid key length (%d)\n", - (int)strlen(name)); + if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN) return -EINVAL; - } fsuid = from_kuid(&init_user_ns, current_fsuid()); fsgid = from_kgid(&init_user_ns, current_fsgid()); @@ -172,6 +169,9 @@ static int orangefs_inode_removexattr(struct inode *inode, const char *name, struct orangefs_kernel_op_s *new_op = NULL; int ret = -ENOMEM; + if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN) + return -EINVAL; + down_write(&orangefs_inode->xattr_sem); new_op = op_alloc(ORANGEFS_VFS_OP_REMOVEXATTR); if (!new_op) @@ -231,23 +231,13 @@ int orangefs_inode_setxattr(struct inode *inode, const char *name, "%s: name %s, buffer_size %zd\n", __func__, name, size); - if (size >= ORANGEFS_MAX_XATTR_VALUELEN || - flags < 0) { - gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n", - (int)size, - flags); + if (size > ORANGEFS_MAX_XATTR_VALUELEN) + return -EINVAL; + if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN) return -EINVAL; - } internal_flag = convert_to_internal_xattr_flags(flags); - if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) { - gossip_err - ("orangefs_inode_setxattr: bogus key size (%d)\n", - (int)(strlen(name))); - return -EINVAL; - } - /* This is equivalent to a removexattr */ if (size == 0 && value == NULL) { gossip_debug(GOSSIP_XATTR_DEBUG, @@ -358,7 +348,7 @@ ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size) returned_count = new_op->downcall.resp.listxattr.returned_count; if (returned_count < 0 || - returned_count >= ORANGEFS_MAX_XATTR_LISTLEN) { + returned_count > ORANGEFS_MAX_XATTR_LISTLEN) { gossip_err("%s: impossible value for returned_count:%d:\n", __func__, returned_count); diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 6515796460df..bfabc65fdc74 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -210,7 +210,7 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode, if (err) goto out_dput; - if (ovl_type_merge(dentry->d_parent)) { + if (ovl_type_merge(dentry->d_parent) && d_is_dir(newdentry)) { /* Setting opaque here is just an optimization, allow to fail */ ovl_set_opaque(dentry, newdentry); } diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index efab7b64925b..b81ce8ddf14a 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -709,6 +709,7 @@ int pstore_register(struct pstore_info *psi) if (psi->flags & PSTORE_FLAGS_PMSG) pstore_register_pmsg(); + /* Start watching for new records, if desired. */ if (pstore_update_ms >= 0) { pstore_timer.expires = jiffies + msecs_to_jiffies(pstore_update_ms); @@ -731,6 +732,11 @@ EXPORT_SYMBOL_GPL(pstore_register); void pstore_unregister(struct pstore_info *psi) { + /* Stop timer and make sure all work has finished. */ + pstore_update_ms = -1; + del_timer_sync(&pstore_timer); + flush_work(&pstore_work); + if (psi->flags & PSTORE_FLAGS_PMSG) pstore_unregister_pmsg(); if (psi->flags & PSTORE_FLAGS_FTRACE) @@ -830,7 +836,9 @@ static void pstore_timefunc(unsigned long dummy) schedule_work(&pstore_work); } - mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms)); + if (pstore_update_ms >= 0) + mod_timer(&pstore_timer, + jiffies + msecs_to_jiffies(pstore_update_ms)); } module_param(backend, charp, 0444); diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index bc927e30bdcc..e11672aa4575 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c @@ -532,7 +532,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, } /* Initialize general buffer state. */ - prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock); + raw_spin_lock_init(&prz->buffer_lock); prz->flags = flags; ret = persistent_ram_buffer_map(start, size, prz, memtype); diff --git a/fs/xattr.c b/fs/xattr.c index 7e3317cf4045..94f49a082dd2 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -530,7 +530,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value, size = XATTR_SIZE_MAX; kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); if (!kvalue) { - kvalue = vmalloc(size); + kvalue = vzalloc(size); if (!kvalue) return -ENOMEM; } diff --git a/include/linux/dax.h b/include/linux/dax.h index d8a3dc042e1c..f8e1833f81f6 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -41,7 +41,6 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, const struct iomap_ops *ops); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); -int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index); void dax_wake_mapping_entry_waiter(struct address_space *mapping, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d0250744507a..333af5a41c78 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -641,18 +641,18 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); -int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, - void *data, unsigned long len); +int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len); int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len); int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); -int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, - void *data, unsigned long len); -int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, - void *data, int offset, unsigned long len); -int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, - gpa_t gpa, unsigned long len); +int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len); +int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, int offset, unsigned long len); +int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + gpa_t gpa, unsigned long len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index bb7250c45cb8..e650f6f7d0bf 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -56,6 +56,9 @@ enum mem_cgroup_stat_index { MEMCG_SLAB_RECLAIMABLE, MEMCG_SLAB_UNRECLAIMABLE, MEMCG_SOCK, + MEMCG_WORKINGSET_REFAULT, + MEMCG_WORKINGSET_ACTIVATE, + MEMCG_WORKINGSET_NODERECLAIM, MEMCG_NR_STAT, }; @@ -494,6 +497,40 @@ extern int do_swap_account; void lock_page_memcg(struct page *page); void unlock_page_memcg(struct page *page); +static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg, + enum mem_cgroup_stat_index idx) +{ + long val = 0; + int cpu; + + for_each_possible_cpu(cpu) + val += per_cpu(memcg->stat->count[idx], cpu); + + if (val < 0) + val = 0; + + return val; +} + +static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg, + enum mem_cgroup_stat_index idx, int val) +{ + if (!mem_cgroup_disabled()) + this_cpu_add(memcg->stat->count[idx], val); +} + +static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg, + enum mem_cgroup_stat_index idx) +{ + mem_cgroup_update_stat(memcg, idx, 1); +} + +static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg, + enum mem_cgroup_stat_index idx) +{ + mem_cgroup_update_stat(memcg, idx, -1); +} + /** * mem_cgroup_update_page_stat - update page state statistics * @page: the page @@ -508,14 +545,14 @@ void unlock_page_memcg(struct page *page); * if (TestClearPageState(page)) * mem_cgroup_update_page_stat(page, state, -1); * unlock_page(page) or unlock_page_memcg(page) + * + * Kernel pages are an exception to this, since they'll never move. */ static inline void mem_cgroup_update_page_stat(struct page *page, enum mem_cgroup_stat_index idx, int val) { - VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page))); - if (page->mem_cgroup) - this_cpu_add(page->mem_cgroup->stat->count[idx], val); + mem_cgroup_update_stat(page->mem_cgroup, idx, val); } static inline void mem_cgroup_inc_page_stat(struct page *page, @@ -740,6 +777,27 @@ static inline bool mem_cgroup_oom_synchronize(bool wait) return false; } +static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg, + enum mem_cgroup_stat_index idx) +{ + return 0; +} + +static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg, + enum mem_cgroup_stat_index idx, int val) +{ +} + +static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg, + enum mem_cgroup_stat_index idx) +{ +} + +static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg, + enum mem_cgroup_stat_index idx) +{ +} + static inline void mem_cgroup_update_page_stat(struct page *page, enum mem_cgroup_stat_index idx, int nr) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 8e02b3750fe0..d45172b559d8 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -226,6 +226,8 @@ struct lruvec { struct zone_reclaim_stat reclaim_stat; /* Evictions & activations on the inactive file list */ atomic_long_t inactive_age; + /* Refaults at the time of last reclaim cycle */ + unsigned long refaults; #ifdef CONFIG_MEMCG struct pglist_data *pgdat; #endif diff --git a/init/initramfs.c b/init/initramfs.c index 981f286c1d16..8daf7ac6c7e2 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -608,9 +608,11 @@ static void __init clean_rootfs(void) static int __init populate_rootfs(void) { + /* Load the built in initramfs */ char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size); if (err) panic("%s", err); /* Failed to decompress INTERNAL initramfs */ + /* If available load the bootloader supplied initrd */ if (initrd_start) { #ifdef CONFIG_BLK_DEV_RAM int fd; @@ -640,6 +642,7 @@ static int __init populate_rootfs(void) free_initrd(); } done: + /* empty statement */; #else printk(KERN_INFO "Unpacking initramfs...\n"); err = unpack_to_rootfs((char *)initrd_start, @@ -648,13 +651,14 @@ static int __init populate_rootfs(void) printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); free_initrd(); #endif - flush_delayed_fput(); - /* - * Try loading default modules from initramfs. This gives - * us a chance to load before device_initcalls. - */ - load_default_modules(); } + flush_delayed_fput(); + /* + * Try loading default modules from initramfs. This gives + * us a chance to load before device_initcalls. + */ + load_default_modules(); + return 0; } rootfs_initcall(populate_rootfs); diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 687f5e0194ef..b507f1889a72 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -438,6 +438,11 @@ struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp, static void cgroup_get(struct cgroup *cgrp) { + css_get(&cgrp->self); +} + +static void cgroup_get_live(struct cgroup *cgrp) +{ WARN_ON_ONCE(cgroup_is_dead(cgrp)); css_get(&cgrp->self); } @@ -932,7 +937,7 @@ static void link_css_set(struct list_head *tmp_links, struct css_set *cset, list_add_tail(&link->cgrp_link, &cset->cgrp_links); if (cgroup_parent(cgrp)) - cgroup_get(cgrp); + cgroup_get_live(cgrp); } /** @@ -1802,7 +1807,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, return ERR_PTR(-EINVAL); } cgrp_dfl_visible = true; - cgroup_get(&cgrp_dfl_root.cgrp); + cgroup_get_live(&cgrp_dfl_root.cgrp); dentry = cgroup_do_mount(&cgroup2_fs_type, flags, &cgrp_dfl_root, CGROUP2_SUPER_MAGIC, ns); @@ -2576,7 +2581,7 @@ void cgroup_lock_and_drain_offline(struct cgroup *cgrp) if (!css || !percpu_ref_is_dying(&css->refcnt)) continue; - cgroup_get(dsct); + cgroup_get_live(dsct); prepare_to_wait(&dsct->offline_waitq, &wait, TASK_UNINTERRUPTIBLE); @@ -3947,7 +3952,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css, { lockdep_assert_held(&cgroup_mutex); - cgroup_get(cgrp); + cgroup_get_live(cgrp); memset(css, 0, sizeof(*css)); css->cgroup = cgrp; @@ -4123,7 +4128,7 @@ static struct cgroup *cgroup_create(struct cgroup *parent) /* allocation complete, commit to creation */ list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children); atomic_inc(&root->nr_cgrps); - cgroup_get(parent); + cgroup_get_live(parent); /* * @cgrp is now fully operational. If something fails after this @@ -4947,7 +4952,7 @@ struct cgroup *cgroup_get_from_path(const char *path) if (kn) { if (kernfs_type(kn) == KERNFS_DIR) { cgrp = kn->priv; - cgroup_get(cgrp); + cgroup_get_live(cgrp); } else { cgrp = ERR_PTR(-ENOTDIR); } @@ -5027,6 +5032,11 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd) /* Socket clone path */ if (skcd->val) { + /* + * We might be cloning a socket which is left in an empty + * cgroup and the cgroup might have already been rmdir'd. + * Don't use cgroup_get_live(). + */ cgroup_get(sock_cgroup_ptr(skcd)); return; } diff --git a/kernel/padata.c b/kernel/padata.c index 3202aa17492c..f1aef1639204 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -354,7 +354,7 @@ static int padata_setup_cpumasks(struct parallel_data *pd, cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { - free_cpumask_var(pd->cpumask.cbcpu); + free_cpumask_var(pd->cpumask.pcpu); return -ENOMEM; } diff --git a/mm/filemap.c b/mm/filemap.c index 1694623a6289..157c047b180a 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2719,18 +2719,16 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) * about to write. We do this *before* the write so that we can return * without clobbering -EIOCBQUEUED from ->direct_IO(). */ - if (mapping->nrpages) { - written = invalidate_inode_pages2_range(mapping, + written = invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end); - /* - * If a page can not be invalidated, return 0 to fall back - * to buffered write. - */ - if (written) { - if (written == -EBUSY) - return 0; - goto out; - } + /* + * If a page can not be invalidated, return 0 to fall back + * to buffered write. + */ + if (written) { + if (written == -EBUSY) + return 0; + goto out; } data = *from; @@ -2744,10 +2742,8 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) * so we don't support it 100%. If this invalidation * fails, tough, the write still worked... */ - if (mapping->nrpages) { - invalidate_inode_pages2_range(mapping, - pos >> PAGE_SHIFT, end); - } + invalidate_inode_pages2_range(mapping, + pos >> PAGE_SHIFT, end); if (written > 0) { pos += written; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2bd7541d7c11..a4b8fc7aaf80 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -568,23 +568,6 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) * common workload, threshold and synchronization as vmstat[] should be * implemented. */ -static unsigned long -mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) -{ - long val = 0; - int cpu; - - /* Per-cpu values can be negative, use a signed accumulator */ - for_each_possible_cpu(cpu) - val += per_cpu(memcg->stat->count[idx], cpu); - /* - * Summing races with updates, so val may be negative. Avoid exposing - * transient negative values. - */ - if (val < 0) - val = 0; - return val; -} static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, enum mem_cgroup_events_index idx) @@ -5237,6 +5220,13 @@ static int memory_stat_show(struct seq_file *m, void *v) seq_printf(m, "pgmajfault %lu\n", events[MEM_CGROUP_EVENTS_PGMAJFAULT]); + seq_printf(m, "workingset_refault %lu\n", + stat[MEMCG_WORKINGSET_REFAULT]); + seq_printf(m, "workingset_activate %lu\n", + stat[MEMCG_WORKINGSET_ACTIVATE]); + seq_printf(m, "workingset_nodereclaim %lu\n", + stat[MEMCG_WORKINGSET_NODERECLAIM]); + return 0; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 07efbc3a8656..c5fee5a0316d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3245,6 +3245,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, enum compact_priority prio, enum compact_result *compact_result) { struct page *page; + unsigned int noreclaim_flag = current->flags & PF_MEMALLOC; if (!order) return NULL; @@ -3252,7 +3253,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, current->flags |= PF_MEMALLOC; *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, prio); - current->flags &= ~PF_MEMALLOC; + current->flags = (current->flags & ~PF_MEMALLOC) | noreclaim_flag; if (*compact_result <= COMPACT_INACTIVE) return NULL; diff --git a/mm/truncate.c b/mm/truncate.c index 6263affdef88..5f1c4b65239e 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -67,17 +67,14 @@ static void truncate_exceptional_entry(struct address_space *mapping, /* * Invalidate exceptional entry if easily possible. This handles exceptional - * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and - * clean entries. + * entries for invalidate_inode_pages(). */ static int invalidate_exceptional_entry(struct address_space *mapping, pgoff_t index, void *entry) { - /* Handled by shmem itself */ - if (shmem_mapping(mapping)) + /* Handled by shmem itself, or for DAX we do nothing. */ + if (shmem_mapping(mapping) || dax_mapping(mapping)) return 1; - if (dax_mapping(mapping)) - return dax_invalidate_mapping_entry(mapping, index); clear_shadow_entry(mapping, index, entry); return 1; } @@ -686,6 +683,17 @@ int invalidate_inode_pages2_range(struct address_space *mapping, cond_resched(); index++; } + /* + * For DAX we invalidate page tables after invalidating radix tree. We + * could invalidate page tables while invalidating each entry however + * that would be expensive. And doing range unmapping before doesn't + * work as we have no cheap way to find whether radix tree entry didn't + * get remapped later. + */ + if (dax_mapping(mapping)) { + unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT, + (loff_t)(end - start + 1) << PAGE_SHIFT, 0); + } cleancache_invalidate_inode(mapping); return ret; } diff --git a/mm/vmscan.c b/mm/vmscan.c index bc8031ef994d..1345d5fba4a2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2033,6 +2033,8 @@ static void shrink_active_list(unsigned long nr_to_scan, * Both inactive lists should also be large enough that each inactive * page has a chance to be referenced again before it is reclaimed. * + * If that fails and refaulting is observed, the inactive list grows. + * * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages * on this LRU, maintained by the pageout code. A zone->inactive_ratio * of 3 means 3:1 or 25% of the pages are kept on the inactive list. @@ -2049,12 +2051,15 @@ static void shrink_active_list(unsigned long nr_to_scan, * 10TB 320 32GB */ static bool inactive_list_is_low(struct lruvec *lruvec, bool file, - struct scan_control *sc, bool trace) + struct mem_cgroup *memcg, + struct scan_control *sc, bool actual_reclaim) { - unsigned long inactive_ratio; - unsigned long inactive, active; - enum lru_list inactive_lru = file * LRU_FILE; enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE; + struct pglist_data *pgdat = lruvec_pgdat(lruvec); + enum lru_list inactive_lru = file * LRU_FILE; + unsigned long inactive, active; + unsigned long inactive_ratio; + unsigned long refaults; unsigned long gb; /* @@ -2067,27 +2072,43 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file, inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx); active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx); - gb = (inactive + active) >> (30 - PAGE_SHIFT); - if (gb) - inactive_ratio = int_sqrt(10 * gb); + if (memcg) + refaults = mem_cgroup_read_stat(memcg, + MEMCG_WORKINGSET_ACTIVATE); else - inactive_ratio = 1; + refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE); + + /* + * When refaults are being observed, it means a new workingset + * is being established. Disable active list protection to get + * rid of the stale workingset quickly. + */ + if (file && actual_reclaim && lruvec->refaults != refaults) { + inactive_ratio = 0; + } else { + gb = (inactive + active) >> (30 - PAGE_SHIFT); + if (gb) + inactive_ratio = int_sqrt(10 * gb); + else + inactive_ratio = 1; + } - if (trace) - trace_mm_vmscan_inactive_list_is_low(lruvec_pgdat(lruvec)->node_id, - sc->reclaim_idx, - lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive, - lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active, - inactive_ratio, file); + if (actual_reclaim) + trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx, + lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive, + lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active, + inactive_ratio, file); return inactive * inactive_ratio < active; } static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, - struct lruvec *lruvec, struct scan_control *sc) + struct lruvec *lruvec, struct mem_cgroup *memcg, + struct scan_control *sc) { if (is_active_lru(lru)) { - if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true)) + if (inactive_list_is_low(lruvec, is_file_lru(lru), + memcg, sc, true)) shrink_active_list(nr_to_scan, lruvec, sc, lru); return 0; } @@ -2218,7 +2239,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, * lruvec even if it has plenty of old anonymous pages unless the * system is under heavy pressure. */ - if (!inactive_list_is_low(lruvec, true, sc, false) && + if (!inactive_list_is_low(lruvec, true, memcg, sc, false) && lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) { scan_balance = SCAN_FILE; goto out; @@ -2376,7 +2397,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc nr[lru] -= nr_to_scan; nr_reclaimed += shrink_list(lru, nr_to_scan, - lruvec, sc); + lruvec, memcg, sc); } } @@ -2443,7 +2464,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc * Even if we did not try to evict anon pages at all, we want to * rebalance the anon lru active/inactive ratio. */ - if (inactive_list_is_low(lruvec, false, sc, true)) + if (inactive_list_is_low(lruvec, false, memcg, sc, true)) shrink_active_list(SWAP_CLUSTER_MAX, lruvec, sc, LRU_ACTIVE_ANON); } @@ -2752,6 +2773,26 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) sc->gfp_mask = orig_mask; } +static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat) +{ + struct mem_cgroup *memcg; + + memcg = mem_cgroup_iter(root_memcg, NULL, NULL); + do { + unsigned long refaults; + struct lruvec *lruvec; + + if (memcg) + refaults = mem_cgroup_read_stat(memcg, + MEMCG_WORKINGSET_ACTIVATE); + else + refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE); + + lruvec = mem_cgroup_lruvec(pgdat, memcg); + lruvec->refaults = refaults; + } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL))); +} + /* * This is the main entry point to direct page reclaim. * @@ -2772,6 +2813,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, struct scan_control *sc) { int initial_priority = sc->priority; + pg_data_t *last_pgdat; + struct zoneref *z; + struct zone *zone; retry: delayacct_freepages_start(); @@ -2798,6 +2842,15 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, sc->may_writepage = 1; } while (--sc->priority >= 0); + last_pgdat = NULL; + for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, + sc->nodemask) { + if (zone->zone_pgdat == last_pgdat) + continue; + last_pgdat = zone->zone_pgdat; + snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); + } + delayacct_freepages_end(); if (sc->nr_reclaimed) @@ -3076,7 +3129,7 @@ static void age_active_anon(struct pglist_data *pgdat, do { struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); - if (inactive_list_is_low(lruvec, false, sc, true)) + if (inactive_list_is_low(lruvec, false, memcg, sc, true)) shrink_active_list(SWAP_CLUSTER_MAX, lruvec, sc, LRU_ACTIVE_ANON); @@ -3311,6 +3364,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) } while (sc.priority >= 1); out: + snapshot_refaults(NULL, pgdat); /* * Return the order kswapd stopped reclaiming at as * prepare_kswapd_sleep() takes it into account. If another caller diff --git a/mm/workingset.c b/mm/workingset.c index eda05c71fa49..51c6f61d4cea 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -269,7 +269,6 @@ bool workingset_refault(void *shadow) lruvec = mem_cgroup_lruvec(pgdat, memcg); refault = atomic_long_read(&lruvec->inactive_age); active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES); - rcu_read_unlock(); /* * The unsigned subtraction here gives an accurate distance @@ -290,11 +289,15 @@ bool workingset_refault(void *shadow) refault_distance = (refault - eviction) & EVICTION_MASK; inc_node_state(pgdat, WORKINGSET_REFAULT); + mem_cgroup_inc_stat(memcg, MEMCG_WORKINGSET_REFAULT); if (refault_distance <= active_file) { inc_node_state(pgdat, WORKINGSET_ACTIVATE); + mem_cgroup_inc_stat(memcg, MEMCG_WORKINGSET_ACTIVATE); + rcu_read_unlock(); return true; } + rcu_read_unlock(); return false; } @@ -472,6 +475,8 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, if (WARN_ON_ONCE(node->exceptional)) goto out_invalid; inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM); + mem_cgroup_inc_page_stat(virt_to_page(node), + MEMCG_WORKINGSET_NODERECLAIM); __radix_tree_delete_node(&mapping->page_tree, node, workingset_update_node, mapping); diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index f64d6566021f..638bf0e1a2e3 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -1680,7 +1680,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE)) + if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE| + MSG_CMSG_COMPAT)) return -EINVAL; if (len < 4 || len > HCI_MAX_FRAME_SIZE) diff --git a/net/core/datagram.c b/net/core/datagram.c index f4947e737f34..d797baa69e43 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -760,7 +760,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, if (msg_data_left(msg) < chunk) { if (__skb_checksum_complete(skb)) - goto csum_error; + return -EINVAL; if (skb_copy_datagram_msg(skb, hlen, msg, chunk)) goto fault; } else { @@ -768,15 +768,16 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter, chunk, &csum)) goto fault; - if (csum_fold(csum)) - goto csum_error; + + if (csum_fold(csum)) { + iov_iter_revert(&msg->msg_iter, chunk); + return -EINVAL; + } + if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) netdev_rx_csum_fault(skb->dev); } return 0; -csum_error: - iov_iter_revert(&msg->msg_iter, chunk); - return -EINVAL; fault: return -EFAULT; } diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c new file mode 100644 index 000000000000..745b4b1b8b21 --- /dev/null +++ b/tools/perf/arch/s390/annotate/instructions.c @@ -0,0 +1,30 @@ +static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *name) +{ + struct ins_ops *ops = NULL; + + /* catch all kind of jumps */ + if (strchr(name, 'j') || + !strncmp(name, "bct", 3) || + !strncmp(name, "br", 2)) + ops = &jump_ops; + /* override call/returns */ + if (!strcmp(name, "bras") || + !strcmp(name, "brasl") || + !strcmp(name, "basr")) + ops = &call_ops; + if (!strcmp(name, "br")) + ops = &ret_ops; + + arch__associate_ins_ops(arch, name, ops); + return ops; +} + +static int s390__annotate_init(struct arch *arch) +{ + if (!arch->initialized) { + arch->initialized = true; + arch->associate_instruction_ops = s390__associate_ins_ops; + } + + return 0; +} diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 7aa57225cbf7..83c57a11dc5b 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -108,6 +108,7 @@ static int arch__associate_ins_ops(struct arch* arch, const char *name, struct i #include "arch/arm64/annotate/instructions.c" #include "arch/x86/annotate/instructions.c" #include "arch/powerpc/annotate/instructions.c" +#include "arch/s390/annotate/instructions.c" static struct arch architectures[] = { { @@ -132,6 +133,13 @@ static struct arch architectures[] = { }, { .name = "s390", + .init = s390__annotate_init, + .objdump = { + .comment_char = '#', + }, + }, + { + .name = "s390", .objdump = { .comment_char = '#', }, diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index c5a6e0b12452..78bd632f144d 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -1826,7 +1826,7 @@ static int addr_filter__resolve_kernel_syms(struct addr_filter *filt) filt->addr = start; if (filt->range && !filt->size && !filt->sym_to) { filt->size = size; - no_size = !!size; + no_size = !size; } } @@ -1840,7 +1840,7 @@ static int addr_filter__resolve_kernel_syms(struct addr_filter *filt) if (err) return err; filt->size = start + size - filt->addr; - no_size = !!size; + no_size = !size; } /* The very last symbol in kallsyms does not imply a particular size */ diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c index f6121612e769..b9a22f18566a 100644 --- a/tools/testing/selftests/x86/ldt_gdt.c +++ b/tools/testing/selftests/x86/ldt_gdt.c @@ -409,6 +409,51 @@ static void *threadproc(void *ctx) } } +#ifdef __i386__ + +#ifndef SA_RESTORE +#define SA_RESTORER 0x04000000 +#endif + +/* + * The UAPI header calls this 'struct sigaction', which conflicts with + * glibc. Sigh. + */ +struct fake_ksigaction { + void *handler; /* the real type is nasty */ + unsigned long sa_flags; + void (*sa_restorer)(void); + unsigned char sigset[8]; +}; + +static void fix_sa_restorer(int sig) +{ + struct fake_ksigaction ksa; + + if (syscall(SYS_rt_sigaction, sig, NULL, &ksa, 8) == 0) { + /* + * glibc has a nasty bug: it sometimes writes garbage to + * sa_restorer. This interacts quite badly with anything + * that fiddles with SS because it can trigger legacy + * stack switching. Patch it up. See: + * + * https://sourceware.org/bugzilla/show_bug.cgi?id=21269 + */ + if (!(ksa.sa_flags & SA_RESTORER) && ksa.sa_restorer) { + ksa.sa_restorer = NULL; + if (syscall(SYS_rt_sigaction, sig, &ksa, NULL, + sizeof(ksa.sigset)) != 0) + err(1, "rt_sigaction"); + } + } +} +#else +static void fix_sa_restorer(int sig) +{ + /* 64-bit glibc works fine. */ +} +#endif + static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), int flags) { @@ -420,6 +465,7 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), if (sigaction(sig, &sa, 0)) err(1, "sigaction"); + fix_sa_restorer(sig); } static jmp_buf jmpbuf; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 88257b311cb5..7e80f62f034c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1973,18 +1973,18 @@ static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, return 0; } -int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, +int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len) { - struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); + struct kvm_memslots *slots = kvm_memslots(kvm); return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); } -EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva_cache_init); +EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); -int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, - void *data, int offset, unsigned long len) +int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, int offset, unsigned long len) { - struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); + struct kvm_memslots *slots = kvm_memslots(kvm); int r; gpa_t gpa = ghc->gpa + offset; @@ -1994,7 +1994,7 @@ int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_ __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len); if (unlikely(!ghc->memslot)) - return kvm_vcpu_write_guest(vcpu, gpa, data, len); + return kvm_write_guest(kvm, gpa, data, len); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; @@ -2006,19 +2006,19 @@ int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_ return 0; } -EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_offset_cached); +EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); -int kvm_vcpu_write_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, - void *data, unsigned long len) +int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len) { - return kvm_vcpu_write_guest_offset_cached(vcpu, ghc, data, 0, len); + return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); } -EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_cached); +EXPORT_SYMBOL_GPL(kvm_write_guest_cached); -int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, - void *data, unsigned long len) +int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len) { - struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); + struct kvm_memslots *slots = kvm_memslots(kvm); int r; BUG_ON(len > ghc->len); @@ -2027,7 +2027,7 @@ int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *g __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len); if (unlikely(!ghc->memslot)) - return kvm_vcpu_read_guest(vcpu, ghc->gpa, data, len); + return kvm_read_guest(kvm, ghc->gpa, data, len); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; @@ -2038,7 +2038,7 @@ int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *g return 0; } -EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_cached); +EXPORT_SYMBOL_GPL(kvm_read_guest_cached); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) {