diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 9fa2bf8c3f6f..7830f1c34a7d 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -1955,6 +1955,7 @@ registers, find a list below: PPC | KVM_REG_PPC_TM_VSCR | 32 PPC | KVM_REG_PPC_TM_DSCR | 64 PPC | KVM_REG_PPC_TM_TAR | 64 + PPC | KVM_REG_PPC_TM_XER | 64 | | MIPS | KVM_REG_MIPS_R0 | 64 ... diff --git a/Makefile b/Makefile index df72b644f78c..1aba5352e0bd 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 1 -SUBLEVEL = 37 +SUBLEVEL = 38 EXTRAVERSION = NAME = Series 4800 diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 7d0f07020c80..ab19044815f5 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -218,8 +218,7 @@ static int __init xen_guest_init(void) * for secondary CPUs as they are brought up. * For uniformity we use VCPUOP_register_vcpu_info even on cpu0. */ - xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info), - sizeof(struct vcpu_info)); + xen_vcpu_info = alloc_percpu(struct vcpu_info); if (xen_vcpu_info == NULL) return -ENOMEM; diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S index b6fcbaf5027b..3dc44b05fb97 100644 --- a/arch/powerpc/boot/ps3-head.S +++ b/arch/powerpc/boot/ps3-head.S @@ -57,11 +57,6 @@ __system_reset_overlay: bctr 1: - /* Save the value at addr zero for a null pointer write check later. */ - - li r4, 0 - lwz r3, 0(r4) - /* Primary delays then goes to _zimage_start in wrapper. */ or 31, 31, 31 /* db16cyc */ diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c index 4ec2d86d3c50..a05558a7e51a 100644 --- a/arch/powerpc/boot/ps3.c +++ b/arch/powerpc/boot/ps3.c @@ -119,13 +119,12 @@ void ps3_copy_vectors(void) flush_cache((void *)0x100, 512); } -void platform_init(unsigned long null_check) +void platform_init(void) { const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */ void *chosen; unsigned long ft_addr; u64 rm_size; - unsigned long val; console_ops.write = ps3_console_write; platform_ops.exit = ps3_exit; @@ -153,11 +152,6 @@ void platform_init(unsigned long null_check) printf(" flat tree at 0x%lx\n\r", ft_addr); - val = *(unsigned long *)0; - - if (val != null_check) - printf("null check failed: %lx != %lx\n\r", val, null_check); - ((kernel_entry_t)0)(ft_addr, 0, NULL); ps3_exit(); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index a193a13cf08b..7fe65af0035d 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -532,6 +532,7 @@ struct kvm_vcpu_arch { u64 tfiar; u32 cr_tm; + u64 xer_tm; u64 lr_tm; u64 ctr_tm; u64 amr_tm; diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index ab4d4732c492..720b71a636c8 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -587,6 +587,7 @@ struct kvm_get_htab_header { #define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67) #define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68) #define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69) +#define KVM_REG_PPC_TM_XER (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a) /* PPC64 eXternal Interrupt Controller Specification */ #define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 0034b6b3556a..d8d332e65078 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -583,6 +583,7 @@ int main(void) DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr)); DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm)); DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm)); + DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm)); DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm)); DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm)); DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm)); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index f5b3de7f7fa2..63c37fd2b7a6 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1171,6 +1171,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_TM_CR: *val = get_reg_val(id, vcpu->arch.cr_tm); break; + case KVM_REG_PPC_TM_XER: + *val = get_reg_val(id, vcpu->arch.xer_tm); + break; case KVM_REG_PPC_TM_LR: *val = get_reg_val(id, vcpu->arch.lr_tm); break; @@ -1378,6 +1381,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_TM_CR: vcpu->arch.cr_tm = set_reg_val(id, *val); break; + case KVM_REG_PPC_TM_XER: + vcpu->arch.xer_tm = set_reg_val(id, *val); + break; case KVM_REG_PPC_TM_LR: vcpu->arch.lr_tm = set_reg_val(id, *val); break; diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index c6d601cc9764..bee16411de73 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -619,6 +619,8 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, HPTE_V_ABSENT); do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); + /* Don't lose R/C bit updates done by hardware */ + r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C); hpte[1] = cpu_to_be64(r); } } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 11d82b91aa4f..70eaf547703e 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2399,11 +2399,13 @@ kvmppc_save_tm: mfctr r7 mfspr r8, SPRN_AMR mfspr r10, SPRN_TAR + mfxer r11 std r5, VCPU_LR_TM(r9) stw r6, VCPU_CR_TM(r9) std r7, VCPU_CTR_TM(r9) std r8, VCPU_AMR_TM(r9) std r10, VCPU_TAR_TM(r9) + std r11, VCPU_XER_TM(r9) /* Restore r12 as trap number. */ lwz r12, VCPU_TRAP(r9) @@ -2496,11 +2498,13 @@ kvmppc_restore_tm: ld r7, VCPU_CTR_TM(r4) ld r8, VCPU_AMR_TM(r4) ld r9, VCPU_TAR_TM(r4) + ld r10, VCPU_XER_TM(r4) mtlr r5 mtcr r6 mtctr r7 mtspr SPRN_AMR, r8 mtspr SPRN_TAR, r9 + mtxer r10 /* * Load up PPR and DSCR values but don't put them in the actual SPRs diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 08f9d9230b94..341ea55d2e85 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1172,10 +1172,10 @@ static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12) return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR; } -static inline bool is_exception(u32 intr_info) +static inline bool is_nmi(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) - == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); + == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK); } static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, @@ -5089,7 +5089,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) if (is_machine_check(intr_info)) return handle_machine_check(vcpu); - if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) + if (is_nmi(intr_info)) return 1; /* already handled by vmx_vcpu_run() */ if (is_no_device(intr_info)) { @@ -7519,7 +7519,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) switch (exit_reason) { case EXIT_REASON_EXCEPTION_NMI: - if (!is_exception(intr_info)) + if (is_nmi(intr_info)) return false; else if (is_page_fault(intr_info)) return enable_ept; @@ -7967,8 +7967,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) kvm_machine_check(); /* We need to handle NMIs before interrupts are enabled */ - if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && - (exit_intr_info & INTR_INFO_VALID_MASK)) { + if (is_nmi(exit_intr_info)) { kvm_before_handle_nmi(&vmx->vcpu); asm("int $2"); kvm_after_handle_nmi(&vmx->vcpu); diff --git a/block/bsg.c b/block/bsg.c index d214e929ce18..b9a53615bdef 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -655,6 +655,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) dprintk("%s: write %Zd bytes\n", bd->name, count); + if (unlikely(segment_eq(get_fs(), KERNEL_DS))) + return -EINVAL; + bsg_set_block(bd, file); bytes_written = 0; diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 4d1d9de4f9bf..8b0b95014465 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -923,13 +923,14 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, timeout = MAX_JIFFY_OFFSET; } - retval = wait_for_completion_interruptible_timeout(&buf->completion, + timeout = wait_for_completion_interruptible_timeout(&buf->completion, timeout); - if (retval == -ERESTARTSYS || !retval) { + if (timeout == -ERESTARTSYS || !timeout) { + retval = timeout; mutex_lock(&fw_lock); fw_load_abort(fw_priv); mutex_unlock(&fw_lock); - } else if (retval > 0) { + } else if (timeout > 0) { retval = 0; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index cef6fa83a274..ea0c863861b9 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1436,7 +1436,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_start_request(bd->rq); if (lo->lo_state != Lo_bound) - return -EIO; + return BLK_MQ_RQ_QUEUE_ERROR; if (cmd->rq->cmd_flags & REQ_WRITE) { struct loop_device *lo = cmd->rq->q->queuedata; diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index 3111f2778079..849f2e29c243 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c @@ -305,7 +305,6 @@ static int tpmfront_probe(struct xenbus_device *dev, rv = setup_ring(dev, priv); if (rv) { chip = dev_get_drvdata(&dev->dev); - tpm_chip_unregister(chip); ring_free(priv); return rv; } diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 29071a156cbe..c9202f6feda8 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -703,7 +703,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* Will read cryptlen */ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); - aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); /* Write ICV */ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index fd5c5f3370f6..e53dbc90fcb6 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -223,7 +223,8 @@ static int ast_get_dram_info(struct drm_device *dev) ast_write32(ast, 0x10000, 0xfc600309); do { - ; + if (pci_channel_offline(dev->pdev)) + return -EIO; } while (ast_read32(ast, 0x10000) != 0x01); data = ast_read32(ast, 0x10004); @@ -429,7 +430,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags) ast_detect_chip(dev, &need_post); if (ast->chip != AST1180) { - ast_get_dram_info(dev); + ret = ast_get_dram_info(dev); + if (ret) + goto out_free; ast->vram_size = ast_get_vram_info(dev); DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size); } diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 92e7e5795398..db98ab5cde3d 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -484,6 +484,9 @@ static const struct file_operations psb_gem_fops = { .open = drm_open, .release = drm_release, .unlocked_ioctl = psb_unlocked_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .mmap = drm_gem_mmap, .poll = drm_poll, .read = drm_read, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7d53d7e15455..c7a7cc17db32 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2006,21 +2006,19 @@ struct drm_i915_gem_object { /** Record of address bit 17 of each page at last unbind. */ unsigned long *bit_17; - union { - /** for phy allocated objects */ - struct drm_dma_handle *phys_handle; - - struct i915_gem_userptr { - uintptr_t ptr; - unsigned read_only :1; - unsigned workers :4; + struct i915_gem_userptr { + uintptr_t ptr; + unsigned read_only :1; + unsigned workers :4; #define I915_GEM_USERPTR_MAX_WORKERS 15 - struct i915_mm_struct *mm; - struct i915_mmu_object *mmu_object; - struct work_struct *work; - } userptr; - }; + struct i915_mm_struct *mm; + struct i915_mmu_object *mmu_object; + struct work_struct *work; + } userptr; + + /** for phys allocated objects */ + struct drm_dma_handle *phys_handle; }; #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 00bc49835e09..52c703be7882 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1849,32 +1849,34 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) GEN9_MEM_LATENCY_LEVEL_MASK; /* + * If a level n (n > 1) has a 0us latency, all levels m (m >= n) + * need to be disabled. We make sure to sanitize the values out + * of the punit to satisfy this requirement. + */ + for (level = 1; level <= max_level; level++) { + if (wm[level] == 0) { + for (i = level + 1; i <= max_level; i++) + wm[i] = 0; + break; + } + } + + /* * WaWmMemoryReadLatency:skl * * punit doesn't take into account the read latency so we need - * to add 2us to the various latency levels we retrieve from - * the punit. - * - W0 is a bit special in that it's the only level that - * can't be disabled if we want to have display working, so - * we always add 2us there. - * - For levels >=1, punit returns 0us latency when they are - * disabled, so we respect that and don't add 2us then - * - * Additionally, if a level n (n > 1) has a 0us latency, all - * levels m (m >= n) need to be disabled. We make sure to - * sanitize the values out of the punit to satisfy this - * requirement. + * to add 2us to the various latency levels we retrieve from the + * punit when level 0 response data us 0us. */ - wm[0] += 2; - for (level = 1; level <= max_level; level++) - if (wm[level] != 0) + if (wm[0] == 0) { + wm[0] += 2; + for (level = 1; level <= max_level; level++) { + if (wm[level] == 0) + break; wm[level] += 2; - else { - for (i = level + 1; i <= max_level; i++) - wm[i] = 0; - - break; } + } + } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { uint64_t sskpd = I915_READ64(MCH_SSKPD); diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index fa661744a1f5..cba23008eca4 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -90,6 +90,9 @@ static void radeon_show_cursor(struct drm_crtc *crtc) struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; + if (radeon_crtc->cursor_out_of_bounds) + return; + if (ASIC_IS_DCE4(rdev)) { WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | @@ -124,21 +127,25 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y) int xorigin = 0, yorigin = 0; int w = radeon_crtc->cursor_width; + radeon_crtc->cursor_x = x; + radeon_crtc->cursor_y = y; + if (ASIC_IS_AVIVO(rdev)) { /* avivo cursor are offset into the total surface */ x += crtc->x; y += crtc->y; } - DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); - if (x < 0) { + if (x < 0) xorigin = min(-x, radeon_crtc->max_cursor_width - 1); - x = 0; - } - if (y < 0) { + if (y < 0) yorigin = min(-y, radeon_crtc->max_cursor_height - 1); - y = 0; + + if (!ASIC_IS_AVIVO(rdev)) { + x += crtc->x; + y += crtc->y; } + DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); /* fixed on DCE6 and newer */ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) { @@ -161,27 +168,31 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y) if (i > 1) { int cursor_end, frame_end; - cursor_end = x - xorigin + w; + cursor_end = x + w; frame_end = crtc->x + crtc->mode.crtc_hdisplay; if (cursor_end >= frame_end) { w = w - (cursor_end - frame_end); if (!(frame_end & 0x7f)) w--; - } else { - if (!(cursor_end & 0x7f)) - w--; + } else if (cursor_end <= 0) { + goto out_of_bounds; + } else if (!(cursor_end & 0x7f)) { + w--; } if (w <= 0) { - w = 1; - cursor_end = x - xorigin + w; - if (!(cursor_end & 0x7f)) { - x--; - WARN_ON_ONCE(x < 0); - } + goto out_of_bounds; } } } + if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) || + x >= (crtc->x + crtc->mode.crtc_hdisplay) || + y >= (crtc->y + crtc->mode.crtc_vdisplay)) + goto out_of_bounds; + + x += xorigin; + y += yorigin; + if (ASIC_IS_DCE4(rdev)) { WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); @@ -193,6 +204,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y) WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); } else { + x -= crtc->x; + y -= crtc->y; + if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN) y *= 2; @@ -210,10 +224,20 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y) yorigin * 256); } - radeon_crtc->cursor_x = x; - radeon_crtc->cursor_y = y; + if (radeon_crtc->cursor_out_of_bounds) { + radeon_crtc->cursor_out_of_bounds = false; + if (radeon_crtc->cursor_bo) + radeon_show_cursor(crtc); + } return 0; + + out_of_bounds: + if (!radeon_crtc->cursor_out_of_bounds) { + radeon_hide_cursor(crtc); + radeon_crtc->cursor_out_of_bounds = true; + } + return 0; } int radeon_crtc_cursor_move(struct drm_crtc *crtc, @@ -306,22 +330,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc, return ret; } - radeon_crtc->cursor_width = width; - radeon_crtc->cursor_height = height; - radeon_lock_cursor(crtc, true); - if (hot_x != radeon_crtc->cursor_hot_x || + if (width != radeon_crtc->cursor_width || + height != radeon_crtc->cursor_height || + hot_x != radeon_crtc->cursor_hot_x || hot_y != radeon_crtc->cursor_hot_y) { int x, y; x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x; y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y; - radeon_cursor_move_locked(crtc, x, y); - + radeon_crtc->cursor_width = width; + radeon_crtc->cursor_height = height; radeon_crtc->cursor_hot_x = hot_x; radeon_crtc->cursor_hot_y = hot_y; + + radeon_cursor_move_locked(crtc, x, y); } radeon_set_cursor(crtc); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 43ba333949c7..3974e03bd82f 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -330,6 +330,7 @@ struct radeon_crtc { u16 lut_r[256], lut_g[256], lut_b[256]; bool enabled; bool can_tile; + bool cursor_out_of_bounds; uint32_t crtc_offset; struct drm_gem_object *cursor_bo; uint64_t cursor_addr; diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index a808ba001ee7..a717da729fb8 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2975,6 +2975,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, (rdev->pdev->revision == 0x80) || (rdev->pdev->revision == 0x81) || (rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0x87) || (rdev->pdev->device == 0x6604) || (rdev->pdev->device == 0x6605)) { max_sclk = 75000; diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index fa17b552ff78..aab811d4c0e1 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -526,8 +526,11 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec, process_join_error(group, status); else { int mgids_changed, is_mgid0; - ib_find_pkey(group->port->dev->device, group->port->port_num, - be16_to_cpu(rec->pkey), &pkey_index); + + if (ib_find_pkey(group->port->dev->device, + group->port->port_num, be16_to_cpu(rec->pkey), + &pkey_index)) + pkey_index = MCAST_INVALID_PKEY_INDEX; spin_lock_irq(&group->port->lock); if (group->state == MCAST_BUSY && diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 0d23e0568deb..b9705395b0aa 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -522,8 +522,11 @@ void ipoib_mcast_join_task(struct work_struct *work) if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) return; - if (ib_query_port(priv->ca, priv->port, &port_attr) || - port_attr.state != IB_PORT_ACTIVE) { + if (ib_query_port(priv->ca, priv->port, &port_attr)) { + ipoib_dbg(priv, "ib_query_port() failed\n"); + return; + } + if (port_attr.state != IB_PORT_ACTIVE) { ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n", port_attr.state); return; diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c index 599578042ea0..009f75d25268 100644 --- a/drivers/input/misc/drv260x.c +++ b/drivers/input/misc/drv260x.c @@ -597,7 +597,6 @@ static int drv260x_probe(struct i2c_client *client, } haptics->input_dev->name = "drv260x:haptics"; - haptics->input_dev->dev.parent = client->dev.parent; haptics->input_dev->close = drv260x_close; input_set_drvdata(haptics->input_dev, haptics); input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index ce507a405d05..ab16f33b743b 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1482,12 +1482,15 @@ static int crypt_set_key(struct crypt_config *cc, char *key) if (!cc->key_size && strcmp(key, "-")) goto out; + /* clear the flag since following operations may invalidate previously valid key */ + clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); + if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) goto out; - set_bit(DM_CRYPT_KEY_VALID, &cc->flags); - r = crypt_setkey_allcpus(cc); + if (!r) + set_bit(DM_CRYPT_KEY_VALID, &cc->flags); out: /* Hex key string not needed after here, so wipe it. */ diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index 53091295fce9..4ca451e679a3 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -766,17 +766,15 @@ int dm_sm_metadata_create(struct dm_space_map *sm, memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); r = sm_ll_new_metadata(&smm->ll, tm); + if (!r) { + if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS) + nr_blocks = DM_SM_METADATA_MAX_BLOCKS; + r = sm_ll_extend(&smm->ll, nr_blocks); + } + memcpy(&smm->sm, &ops, sizeof(smm->sm)); if (r) return r; - if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS) - nr_blocks = DM_SM_METADATA_MAX_BLOCKS; - r = sm_ll_extend(&smm->ll, nr_blocks); - if (r) - return r; - - memcpy(&smm->sm, &ops, sizeof(smm->sm)); - /* * Now we need to update the newly created data structures with the * allocated blocks that they were built from. diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ef0a99a3a779..e6d689c0a175 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6918,6 +6918,15 @@ static int run(struct mddev *mddev) stripe = (stripe | (stripe-1)) + 1; mddev->queue->limits.discard_alignment = stripe; mddev->queue->limits.discard_granularity = stripe; + + /* + * We use 16-bit counter of active stripes in bi_phys_segments + * (minus one for over-loaded initialization) + */ + blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS); + blk_queue_max_discard_sectors(mddev->queue, + 0xfffe * STRIPE_SECTORS); + /* * unaligned part of discard request will be ignored, so can't * guarantee discard_zeroes_data diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 5eb23ae82def..fdc44c8200ba 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2055,7 +2055,27 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) ctrl &= ~SDHCI_CTRL_EXEC_TUNING; sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); + err = -EIO; + + if (cmd.opcode != MMC_SEND_TUNING_BLOCK_HS200) + goto out; + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + + spin_unlock_irqrestore(&host->lock, flags); + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = MMC_STOP_TRANSMISSION; + cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + cmd.busy_timeout = 50; + mmc_wait_for_cmd(mmc, &cmd, 0); + + spin_lock_irqsave(&host->lock, flags); + goto out; } diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 7cdaf40c3057..ea7b8c25955f 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -27,7 +27,6 @@ static const struct pci_device_id ath_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */ { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */ { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */ - { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */ #ifdef CONFIG_ATH9K_PCOEM /* Mini PCI AR9220 MB92 cards: Compex WLM200NX, Wistron DNMA-92 */ @@ -38,7 +37,7 @@ static const struct pci_device_id ath_pci_id_table[] = { .driver_data = ATH9K_PCI_LED_ACT_HI }, #endif - { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */ + { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */ #ifdef CONFIG_ATH9K_PCOEM { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, @@ -86,7 +85,11 @@ static const struct pci_device_id ath_pci_id_table[] = { 0x10CF, /* Fujitsu */ 0x1536), .driver_data = ATH9K_PCI_D3_L1_WAR }, +#endif + { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */ + +#ifdef CONFIG_ATH9K_PCOEM /* AR9285 card for Asus */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 0x002B, diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 4e720ed402ef..66c12c8f968b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1993,6 +1993,10 @@ bool pci_dev_run_wake(struct pci_dev *dev) if (!dev->pme_support) return false; + /* PME-capable in principle, but not from the intended sleep state */ + if (!pci_pme_capable(dev, pci_target_state(dev))) + return false; + while (bus->parent) { struct pci_dev *bridge = bus->self; diff --git a/drivers/regulator/stw481x-vmmc.c b/drivers/regulator/stw481x-vmmc.c index 7d2ae3e9e942..342f5da79975 100644 --- a/drivers/regulator/stw481x-vmmc.c +++ b/drivers/regulator/stw481x-vmmc.c @@ -47,7 +47,8 @@ static struct regulator_desc vmmc_regulator = { .volt_table = stw481x_vmmc_voltages, .enable_time = 200, /* FIXME: look this up */ .enable_reg = STW_CONF1, - .enable_mask = STW_CONF1_PDN_VMMC, + .enable_mask = STW_CONF1_PDN_VMMC | STW_CONF1_MMC_LS_STATUS, + .enable_val = STW_CONF1_PDN_VMMC, .vsel_reg = STW_CONF1, .vsel_mask = STW_CONF1_VMMC_MASK, }; diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 9bb48d70957c..4d20f7298cb8 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -872,7 +872,7 @@ static int __init vmlogrdr_init(void) goto cleanup; for (i=0; i < MAXMINOR; ++i ) { - sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL); + sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sys_ser[i].buffer) { rc = -ENOMEM; break; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 5a0800d19970..bd6f71b97710 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2490,6 +2490,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, printk("megaraid_sas: pending commands remain after waiting, " "will reset adapter scsi%d.\n", instance->host->host_no); + *convert = 1; retval = 1; } out: diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 9ad41168d26d..72699ac0a0c4 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1005,10 +1005,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) struct request_queue *rq = sdev->request_queue; struct scsi_target *starget = sdev->sdev_target; - error = scsi_device_set_state(sdev, SDEV_RUNNING); - if (error) - return error; - error = scsi_target_add(starget); if (error) return error; diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index ec192939750e..26bc4e9c7441 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -592,6 +592,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; + if (unlikely(segment_eq(get_fs(), KERNEL_DS))) + return -EINVAL; + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index 0f28c08fcb3c..77b551da5728 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c @@ -909,6 +909,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, if (err) { ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n", err); + goto out_free; } else { ssb_dbg("Using SPROM revision %d provided by platform\n", sprom->revision); diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index 4b8da862cd7e..f7bcefd46b5e 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -2079,7 +2079,7 @@ static int ni_ai_insn_read(struct comedi_device *dev, unsigned int *data) { struct ni_private *devpriv = dev->private; - unsigned int mask = (s->maxdata + 1) >> 1; + unsigned int mask = s->maxdata; int i, n; unsigned signbits; unsigned int d; diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 07d2996d8c1f..39e8f22be68b 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -605,8 +605,6 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION); cmd->se_cmd = NULL; - kmem_cache_free(tcmu_cmd_cache, cmd); - return 0; } diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index 1967bee4f076..9035fbc5e98d 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c @@ -98,7 +98,7 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf) long temperature; int ret; - ret = tz->ops->get_trip_temp(tz, 0, &temperature); + ret = tz->ops->get_crit_temp(tz, &temperature); if (ret) return ret; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 41dcefe67b43..84532dc93801 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1720,6 +1720,7 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */ .driver_info = QUIRK_CONTROL_LINE_STATE, }, { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */ + { USB_DEVICE(0x2184, 0x0036) }, /* GW Instek AFG-125 */ { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */ }, /* Motorola H24 HSPA module: */ diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index c98f78b0bf11..7602eceb5403 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -101,6 +101,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); static void hub_release(struct kref *kref); static int usb_reset_and_verify_device(struct usb_device *udev); +static void hub_usb3_port_prepare_disable(struct usb_hub *hub, + struct usb_port *port_dev); static inline char *portspeed(struct usb_hub *hub, int portstatus) { @@ -882,82 +884,28 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1, } /* - * If USB 3.0 ports are placed into the Disabled state, they will no longer - * detect any device connects or disconnects. This is generally not what the - * USB core wants, since it expects a disabled port to produce a port status - * change event when a new device connects. - * - * Instead, set the link state to Disabled, wait for the link to settle into - * that state, clear any change bits, and then put the port into the RxDetect - * state. + * USB-3 does not have a similar link state as USB-2 that will avoid negotiating + * a connection with a plugged-in cable but will signal the host when the cable + * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices */ -static int hub_usb3_port_disable(struct usb_hub *hub, int port1) -{ - int ret; - int total_time; - u16 portchange, portstatus; - - if (!hub_is_superspeed(hub->hdev)) - return -EINVAL; - - ret = hub_port_status(hub, port1, &portstatus, &portchange); - if (ret < 0) - return ret; - - /* - * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI - * Controller [1022:7814] will have spurious result making the following - * usb 3.0 device hotplugging route to the 2.0 root hub and recognized - * as high-speed device if we set the usb 3.0 port link state to - * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we - * check the state here to avoid the bug. - */ - if ((portstatus & USB_PORT_STAT_LINK_STATE) == - USB_SS_PORT_LS_RX_DETECT) { - dev_dbg(&hub->ports[port1 - 1]->dev, - "Not disabling port; link state is RxDetect\n"); - return ret; - } - - ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); - if (ret) - return ret; - - /* Wait for the link to enter the disabled state. */ - for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) { - ret = hub_port_status(hub, port1, &portstatus, &portchange); - if (ret < 0) - return ret; - - if ((portstatus & USB_PORT_STAT_LINK_STATE) == - USB_SS_PORT_LS_SS_DISABLED) - break; - if (total_time >= HUB_DEBOUNCE_TIMEOUT) - break; - msleep(HUB_DEBOUNCE_STEP); - } - if (total_time >= HUB_DEBOUNCE_TIMEOUT) - dev_warn(&hub->ports[port1 - 1]->dev, - "Could not disable after %d ms\n", total_time); - - return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT); -} - static int hub_port_disable(struct usb_hub *hub, int port1, int set_state) { struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *hdev = hub->hdev; int ret = 0; - if (port_dev->child && set_state) - usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED); if (!hub->error) { - if (hub_is_superspeed(hub->hdev)) - ret = hub_usb3_port_disable(hub, port1); - else + if (hub_is_superspeed(hub->hdev)) { + hub_usb3_port_prepare_disable(hub, port_dev); + ret = hub_set_port_link_state(hub, port_dev->portnum, + USB_SS_PORT_LS_U3); + } else { ret = usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE); + } } + if (port_dev->child && set_state) + usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED); if (ret && ret != -ENODEV) dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret); return ret; @@ -4036,6 +3984,26 @@ void usb_unlocked_enable_lpm(struct usb_device *udev) } EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); +/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */ +static void hub_usb3_port_prepare_disable(struct usb_hub *hub, + struct usb_port *port_dev) +{ + struct usb_device *udev = port_dev->child; + int ret; + + if (udev && udev->port_is_suspended && udev->do_remote_wakeup) { + ret = hub_set_port_link_state(hub, port_dev->portnum, + USB_SS_PORT_LS_U0); + if (!ret) { + msleep(USB_RESUME_TIMEOUT); + ret = usb_disable_remote_wakeup(udev); + } + if (ret) + dev_warn(&udev->dev, + "Port disable: can't disable remote wake\n"); + udev->do_remote_wakeup = 0; + } +} #else /* CONFIG_PM */ @@ -4043,6 +4011,9 @@ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); #define hub_resume NULL #define hub_reset_resume NULL +static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub, + struct usb_port *port_dev) { } + int usb_disable_lpm(struct usb_device *udev) { return 0; diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 58b4657fc721..9cd76cc8c0d9 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -143,7 +143,7 @@ int config_ep_by_speed(struct usb_gadget *g, ep_found: /* commit results */ - _ep->maxpacket = usb_endpoint_maxp(chosen_desc); + _ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff; _ep->desc = chosen_desc; _ep->comp_desc = NULL; _ep->maxburst = 0; diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c index 940304c33224..02260cfdedb1 100644 --- a/drivers/usb/host/uhci-pci.c +++ b/drivers/usb/host/uhci-pci.c @@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd) if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP) uhci->wait_for_hp = 1; + /* Intel controllers use non-PME wakeup signalling */ + if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL) + device_set_run_wake(uhci_dev(uhci), 1); + /* Set up pointers to PCI-specific functions */ uhci->reset_hc = uhci_pci_reset_hc; uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc; diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index e020ad28a00c..53c90131764d 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c @@ -296,7 +296,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) rc = usb_serial_generic_open(tty, port); if (rc) { retval = rc; - goto exit; + goto err_free_cfg; } rc = usb_control_msg(port->serial->dev, @@ -315,17 +315,32 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) dev_dbg(&port->dev, "%s - enabled reading\n", __func__); rc = klsi_105_get_line_state(port, &line_state); - if (rc >= 0) { - spin_lock_irqsave(&priv->lock, flags); - priv->line_state = line_state; - spin_unlock_irqrestore(&priv->lock, flags); - dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state); - retval = 0; - } else + if (rc < 0) { retval = rc; + goto err_disable_read; + } + + spin_lock_irqsave(&priv->lock, flags); + priv->line_state = line_state; + spin_unlock_irqrestore(&priv->lock, flags); + dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, + line_state); + + return 0; -exit: +err_disable_read: + usb_control_msg(port->serial->dev, + usb_sndctrlpipe(port->serial->dev, 0), + KL5KUSB105A_SIO_CONFIGURE, + USB_TYPE_VENDOR | USB_DIR_OUT, + KL5KUSB105A_SIO_CONFIGURE_READ_OFF, + 0, /* index */ + NULL, 0, + KLSI_TIMEOUT); + usb_serial_generic_close(port); +err_free_cfg: kfree(cfg); + return retval; } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index a599e8a841b0..248dac170f39 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -271,6 +271,8 @@ static void option_instat_callback(struct urb *urb); #define TELIT_PRODUCT_CC864_SINGLE 0x1006 #define TELIT_PRODUCT_DE910_DUAL 0x1010 #define TELIT_PRODUCT_UE910_V2 0x1012 +#define TELIT_PRODUCT_LE922_USBCFG1 0x1040 +#define TELIT_PRODUCT_LE922_USBCFG2 0x1041 #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 @@ -1222,6 +1224,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0), .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1), + .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), @@ -2001,6 +2007,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index ee71baddbb10..e227eb09b1a0 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) vma->vm_ops = &gntdev_vmops; - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP; if (use_ptemod) vma->vm_flags |= VM_DONTCOPY; diff --git a/fs/block_dev.c b/fs/block_dev.c index ccfd31f1df3a..300566ea9b74 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -729,7 +729,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, return true; /* already a holder */ else if (bdev->bd_holder != NULL) return false; /* held by someone else */ - else if (bdev->bd_contains == bdev) + else if (whole == bdev) return true; /* is a whole device which isn't held */ else if (whole->bd_holder == bd_may_claim) @@ -1772,6 +1772,7 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) spin_lock(&inode_sb_list_lock); list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { struct address_space *mapping = inode->i_mapping; + struct block_device *bdev; spin_lock(&inode->i_lock); if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || @@ -1792,8 +1793,12 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) */ iput(old_inode); old_inode = inode; + bdev = I_BDEV(inode); - func(I_BDEV(inode), arg); + mutex_lock(&bdev->bd_mutex); + if (bdev->bd_openers) + func(bdev, arg); + mutex_unlock(&bdev->bd_mutex); spin_lock(&inode_sb_list_lock); } diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 1848705506ff..0ce4de6430ef 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -64,6 +64,20 @@ void btrfs_##name(struct work_struct *arg) \ normal_work_helper(work); \ } +bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq) +{ + /* + * We could compare wq->normal->pending with num_online_cpus() + * to support "thresh == NO_THRESHOLD" case, but it requires + * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's + * postpone it until someone needs the support of that case. + */ + if (wq->normal->thresh == NO_THRESHOLD) + return false; + + return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; +} + BTRFS_WORK_HELPER(worker_helper); BTRFS_WORK_HELPER(delalloc_helper); BTRFS_WORK_HELPER(flush_delalloc_helper); diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index ec2ee477f8ba..8c4564204f25 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h @@ -78,4 +78,5 @@ void btrfs_queue_work(struct btrfs_workqueue *wq, void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max); void btrfs_set_work_high_priority(struct btrfs_work *work); +bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq); #endif diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index bc2d048a9eb9..8265b0754dca 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1371,7 +1371,8 @@ release_path: total_done++; btrfs_release_prepared_delayed_node(delayed_node); - if (async_work->nr == 0 || total_done < async_work->nr) + if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) || + total_done < async_work->nr) goto again; free_path: @@ -1387,7 +1388,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, { struct btrfs_async_delayed_work *async_work; - if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND || + btrfs_workqueue_normal_congested(fs_info->delayed_workers)) return 0; async_work = kmalloc(sizeof(*async_work), GFP_NOFS); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 32ecb95f6214..6ee954c62fe6 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1821,12 +1821,11 @@ static noinline int find_dir_range(struct btrfs_root *root, next: /* check the next slot in the tree to see if it is a valid item */ nritems = btrfs_header_nritems(path->nodes[0]); + path->slots[0]++; if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(root, path); if (ret) goto out; - } else { - path->slots[0]++; } btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); @@ -4984,6 +4983,7 @@ process_leaf: if (di_key.type == BTRFS_ROOT_ITEM_KEY) continue; + btrfs_release_path(path); di_inode = btrfs_iget(root->fs_info->sb, &di_key, root, NULL); if (IS_ERR(di_inode)) { @@ -4993,13 +4993,12 @@ process_leaf: if (btrfs_inode_in_log(di_inode, trans->transid)) { iput(di_inode); - continue; + break; } ctx->log_new_dentries = false; if (type == BTRFS_FT_DIR) log_mode = LOG_INODE_ALL; - btrfs_release_path(path); ret = btrfs_log_inode(trans, root, di_inode, log_mode, 0, LLONG_MAX, ctx); iput(di_inode); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 22b289a3b1c4..9d0e4fef8ee1 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -615,6 +615,8 @@ struct TCP_Server_Info { #ifdef CONFIG_CIFS_SMB2 unsigned int max_read; unsigned int max_write; + struct delayed_work reconnect; /* reconnect workqueue job */ + struct mutex reconnect_mutex; /* prevent simultaneous reconnects */ #endif /* CONFIG_CIFS_SMB2 */ }; @@ -814,6 +816,7 @@ cap_unix(struct cifs_ses *ses) struct cifs_tcon { struct list_head tcon_list; int tc_count; + struct list_head rlist; /* reconnect list */ struct list_head openFileList; struct cifs_ses *ses; /* pointer to session associated with */ char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index f730c065df34..5ee60b50d8eb 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -207,6 +207,9 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink, struct cifs_pending_open *open); extern void cifs_del_pending_open(struct cifs_pending_open *open); +extern void cifs_put_tcp_session(struct TCP_Server_Info *server, + int from_reconnect); +extern void cifs_put_tcon(struct cifs_tcon *tcon); #if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) extern void cifs_dfs_release_automount_timer(void); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index cd9d50e4f5f4..7d7bd466520b 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -52,6 +52,9 @@ #include "nterr.h" #include "rfc1002pdu.h" #include "fscache.h" +#ifdef CONFIG_CIFS_SMB2 +#include "smb2proto.h" +#endif #define CIFS_PORT 445 #define RFC1001_PORT 139 @@ -2069,8 +2072,8 @@ cifs_find_tcp_session(struct smb_vol *vol) return NULL; } -static void -cifs_put_tcp_session(struct TCP_Server_Info *server) +void +cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) { struct task_struct *task; @@ -2087,6 +2090,19 @@ cifs_put_tcp_session(struct TCP_Server_Info *server) cancel_delayed_work_sync(&server->echo); +#ifdef CONFIG_CIFS_SMB2 + if (from_reconnect) + /* + * Avoid deadlock here: reconnect work calls + * cifs_put_tcp_session() at its end. Need to be sure + * that reconnect work does nothing with server pointer after + * that step. + */ + cancel_delayed_work(&server->reconnect); + else + cancel_delayed_work_sync(&server->reconnect); +#endif + spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; spin_unlock(&GlobalMid_Lock); @@ -2151,6 +2167,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info) INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list); INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); +#ifdef CONFIG_CIFS_SMB2 + INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server); + mutex_init(&tcp_ses->reconnect_mutex); +#endif memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr, sizeof(tcp_ses->srcaddr)); memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr, @@ -2303,7 +2323,7 @@ cifs_put_smb_ses(struct cifs_ses *ses) spin_unlock(&cifs_tcp_ses_lock); sesInfoFree(ses); - cifs_put_tcp_session(server); + cifs_put_tcp_session(server, 0); } #ifdef CONFIG_KEYS @@ -2476,7 +2496,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) mutex_unlock(&ses->session_mutex); /* existing SMB ses has a server reference already */ - cifs_put_tcp_session(server); + cifs_put_tcp_session(server, 0); free_xid(xid); return ses; } @@ -2566,7 +2586,7 @@ cifs_find_tcon(struct cifs_ses *ses, const char *unc) return NULL; } -static void +void cifs_put_tcon(struct cifs_tcon *tcon) { unsigned int xid; @@ -3673,7 +3693,7 @@ mount_fail_check: else if (ses) cifs_put_smb_ses(ses); else - cifs_put_tcp_session(server); + cifs_put_tcp_session(server, 0); bdi_destroy(&cifs_sb->bdi); } @@ -3984,7 +4004,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info); if (IS_ERR(ses)) { tcon = (struct cifs_tcon *)ses; - cifs_put_tcp_session(master_tcon->ses->server); + cifs_put_tcp_session(master_tcon->ses->server, 0); goto out; } diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index 2ab297dae5a7..1bdfd7c5309c 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -241,7 +241,7 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile) * and check it for zero before using. */ max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf; - if (!max_buf) { + if (max_buf < sizeof(struct smb2_lock_element)) { free_xid(xid); return -EINVAL; } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 8f527c867f78..e3cf4a5fb35a 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -264,7 +264,7 @@ out: case SMB2_CHANGE_NOTIFY: case SMB2_QUERY_INFO: case SMB2_SET_INFO: - return -EAGAIN; + rc = -EAGAIN; } unload_nls(nls_codepage); return rc; @@ -1615,6 +1615,54 @@ smb2_echo_callback(struct mid_q_entry *mid) add_credits(server, credits_received, CIFS_ECHO_OP); } +void smb2_reconnect_server(struct work_struct *work) +{ + struct TCP_Server_Info *server = container_of(work, + struct TCP_Server_Info, reconnect.work); + struct cifs_ses *ses; + struct cifs_tcon *tcon, *tcon2; + struct list_head tmp_list; + int tcon_exist = false; + + /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ + mutex_lock(&server->reconnect_mutex); + + INIT_LIST_HEAD(&tmp_list); + cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n"); + + spin_lock(&cifs_tcp_ses_lock); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { + if (tcon->need_reconnect) { + tcon->tc_count++; + list_add_tail(&tcon->rlist, &tmp_list); + tcon_exist = true; + } + } + } + /* + * Get the reference to server struct to be sure that the last call of + * cifs_put_tcon() in the loop below won't release the server pointer. + */ + if (tcon_exist) + server->srv_count++; + + spin_unlock(&cifs_tcp_ses_lock); + + list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { + smb2_reconnect(SMB2_ECHO, tcon); + list_del_init(&tcon->rlist); + cifs_put_tcon(tcon); + } + + cifs_dbg(FYI, "Reconnecting tcons finished\n"); + mutex_unlock(&server->reconnect_mutex); + + /* now we can safely release srv struct */ + if (tcon_exist) + cifs_put_tcp_session(server, 1); +} + int SMB2_echo(struct TCP_Server_Info *server) { @@ -1627,32 +1675,11 @@ SMB2_echo(struct TCP_Server_Info *server) cifs_dbg(FYI, "In echo request\n"); if (server->tcpStatus == CifsNeedNegotiate) { - struct list_head *tmp, *tmp2; - struct cifs_ses *ses; - struct cifs_tcon *tcon; - - cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n"); - spin_lock(&cifs_tcp_ses_lock); - list_for_each(tmp, &server->smb_ses_list) { - ses = list_entry(tmp, struct cifs_ses, smb_ses_list); - list_for_each(tmp2, &ses->tcon_list) { - tcon = list_entry(tmp2, struct cifs_tcon, - tcon_list); - /* add check for persistent handle reconnect */ - if (tcon && tcon->need_reconnect) { - spin_unlock(&cifs_tcp_ses_lock); - rc = smb2_reconnect(SMB2_ECHO, tcon); - spin_lock(&cifs_tcp_ses_lock); - } - } - } - spin_unlock(&cifs_tcp_ses_lock); + /* No need to send echo on newly established connections */ + queue_delayed_work(cifsiod_wq, &server->reconnect, 0); + return rc; } - /* if no session, renegotiate failed above */ - if (server->tcpStatus == CifsNeedNegotiate) - return -EIO; - rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req); if (rc) return rc; diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 9bc59f9c12fb..0a406ae78129 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -95,6 +95,7 @@ extern int smb2_open_file(const unsigned int xid, extern int smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, const unsigned int xid); extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile); +extern void smb2_reconnect_server(struct work_struct *work); /* * SMB2 Worker functions - most of protocol specific implementation details diff --git a/fs/exec.c b/fs/exec.c index 1977c2a553ac..d392c8ad0de0 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -19,7 +19,7 @@ * current->executable is only used by the procfs. This allows a dispatch * table to check for several different types of binary formats. We keep * trying until we recognize the file or we run out of supported binary - * formats. + * formats. */ #include @@ -1108,6 +1108,13 @@ int flush_old_exec(struct linux_binprm * bprm) flush_thread(); current->personality &= ~bprm->per_clear; + /* + * We have to apply CLOEXEC before we change whether the process is + * dumpable (in setup_new_exec) to avoid a race with a process in userspace + * trying to access the should-be-closed file descriptors of a process + * undergoing exec(2). + */ + do_close_on_exec(current->files); return 0; out: @@ -1157,7 +1164,6 @@ void setup_new_exec(struct linux_binprm * bprm) group */ current->self_exec_id++; flush_signal_handlers(current, 0); - do_close_on_exec(current->files); } EXPORT_SYMBOL(setup_new_exec); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 145d6ba4117d..df67a6f8582a 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -224,6 +224,7 @@ struct ext4_io_submit { #define EXT4_MAX_BLOCK_SIZE 65536 #define EXT4_MIN_BLOCK_LOG_SIZE 10 #define EXT4_MAX_BLOCK_LOG_SIZE 16 +#define EXT4_MAX_CLUSTER_LOG_SIZE 30 #ifdef __KERNEL__ # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize) #else diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 095c7a258d97..d77d542c2ed5 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -336,8 +336,10 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode, len -= EXT4_MIN_INLINE_DATA_SIZE; value = kzalloc(len, GFP_NOFS); - if (!value) + if (!value) { + error = -ENOMEM; goto out; + } error = ext4_xattr_ibody_get(inode, i.name_index, i.name, value, len); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 5b58e266892b..e6798ca34928 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -668,7 +668,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb, ext4_grpblk_t min; ext4_grpblk_t max; ext4_grpblk_t chunk; - unsigned short border; + unsigned int border; BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); @@ -2254,7 +2254,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) struct ext4_group_info *grinfo; struct sg { struct ext4_group_info info; - ext4_grpblk_t counters[16]; + ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; } sg; group--; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 4723d8b02747..97aa8be40175 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3361,10 +3361,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp, ext4_set_bit(s++, buf); count++; } - for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) { - ext4_set_bit(EXT4_B2C(sbi, s++), buf); - count++; + j = ext4_bg_num_gdb(sb, grp); + if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) { + ext4_error(sb, "Invalid number of block group " + "descriptor blocks: %d", j); + j = EXT4_BLOCKS_PER_GROUP(sb) - s; } + count += j; + for (; j > 0; j--) + ext4_set_bit(EXT4_B2C(sbi, s++), buf); } if (!count) return 0; @@ -3736,7 +3741,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (blocksize < EXT4_MIN_BLOCK_SIZE || blocksize > EXT4_MAX_BLOCK_SIZE) { ext4_msg(sb, KERN_ERR, - "Unsupported filesystem blocksize %d", blocksize); + "Unsupported filesystem blocksize %d (%d log_block_size)", + blocksize, le32_to_cpu(es->s_log_block_size)); + goto failed_mount; + } + if (le32_to_cpu(es->s_log_block_size) > + (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { + ext4_msg(sb, KERN_ERR, + "Invalid log block size: %u", + le32_to_cpu(es->s_log_block_size)); goto failed_mount; } @@ -3832,12 +3845,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); - if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0) - goto cantfind_ext4; sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0) goto cantfind_ext4; + if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || + sbi->s_inodes_per_group > blocksize * 8) { + ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n", + sbi->s_blocks_per_group); + goto failed_mount; + } sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb); @@ -3878,6 +3895,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) "block size (%d)", clustersize, blocksize); goto failed_mount; } + if (le32_to_cpu(es->s_log_cluster_size) > + (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { + ext4_msg(sb, KERN_ERR, + "Invalid log cluster size: %u", + le32_to_cpu(es->s_log_cluster_size)); + goto failed_mount; + } sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - le32_to_cpu(es->s_log_block_size); sbi->s_clusters_per_group = @@ -3914,13 +3938,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } sbi->s_cluster_ratio = clustersize / blocksize; - if (sbi->s_inodes_per_group > blocksize * 8) { - ext4_msg(sb, KERN_ERR, - "#inodes per group too big: %lu", - sbi->s_inodes_per_group); - goto failed_mount; - } - /* Do we have standard group size of clustersize * 8 blocks ? */ if (sbi->s_blocks_per_group == clustersize << 3) set_opt2(sb, STD_GROUP_SIZE); diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index f5388f37217e..bb73d0a0f387 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -339,6 +339,7 @@ static int stat_open(struct inode *inode, struct file *file) } static const struct file_operations stat_fops = { + .owner = THIS_MODULE, .open = stat_open, .read = seq_read, .llseek = seq_lseek, diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 8b8d83a526ce..ddf5f9fd719f 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -414,7 +414,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, */ if (!PageUptodate(page)) { unsigned pglen = nfs_page_length(page); - unsigned end = offset + len; + unsigned end = offset + copied; if (pglen == 0) { zero_user_segments(page, 0, offset, diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 1114afdd5a6b..4aefff89949d 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3864,6 +3864,7 @@ xlog_recover_clear_agi_bucket( agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); offset = offsetof(xfs_agi_t, agi_unlinked) + (sizeof(xfs_agino_t) * bucket); + xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF); xfs_trans_log_buf(tp, agibp, offset, (offset + sizeof(xfs_agino_t) - 1)); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ddd47c3a757d..c11f9d1963c3 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1940,8 +1940,8 @@ struct napi_gro_cb { /* This is non-zero if the packet may be of the same flow. */ u8 same_flow:1; - /* Used in udp_gro_receive */ - u8 udp_mark:1; + /* Used in tunnel GRO receive */ + u8 encap_mark:1; /* GRO checksum is valid */ u8 csum_valid:1; diff --git a/include/net/ip.h b/include/net/ip.h index f41fc497b21b..117bde93995d 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -234,6 +234,8 @@ static inline int inet_is_local_reserved_port(struct net *net, int port) } #endif +__be32 inet_current_timestamp(void); + /* From inetpeer.c */ extern int inet_peer_threshold; extern int inet_peer_minttl; diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index ac54c27a2bfd..e6796dc8c764 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -183,10 +183,12 @@ static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); if (dev) { - ip4 = (struct in_device *)dev->ip_ptr; - if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) + ip4 = in_dev_get(dev); + if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) { ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address, (struct in6_addr *)gid); + in_dev_put(ip4); + } dev_put(dev); } } diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 0874e2edd275..79517e5549f1 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -598,11 +598,11 @@ return_normal: /* * Wait for the other CPUs to be notified and be waiting for us: */ - time_left = loops_per_jiffy * HZ; + time_left = MSEC_PER_SEC; while (kgdb_do_roundup && --time_left && (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) != online_cpus) - cpu_relax(); + udelay(1000); if (!time_left) pr_crit("Timed out waiting for secondary CPUs.\n"); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index d9f112bd42a7..d296b904685b 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -309,10 +309,10 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset; static inline u32 arch_gettimeoffset(void) { return 0; } #endif -static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr, +static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, cycle_t delta) { - s64 nsec; + u64 nsec; nsec = delta * tkr->mult + tkr->xtime_nsec; nsec >>= tkr->shift; diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index a51e79688455..972ce5b596f4 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -779,6 +779,10 @@ print_graph_entry_leaf(struct trace_iterator *iter, cpu_data = per_cpu_ptr(data->cpu_data, cpu); + /* If a graph tracer ignored set_graph_notrace */ + if (call->depth < -1) + call->depth += FTRACE_NOTRACE_DEPTH; + /* * Comments display at + 1 to depth. Since * this is a leaf function, keep the comments @@ -787,7 +791,8 @@ print_graph_entry_leaf(struct trace_iterator *iter, cpu_data->depth = call->depth - 1; /* No need to keep this function around for this depth */ - if (call->depth < FTRACE_RETFUNC_DEPTH) + if (call->depth < FTRACE_RETFUNC_DEPTH && + !WARN_ON_ONCE(call->depth < 0)) cpu_data->enter_funcs[call->depth] = 0; } @@ -816,11 +821,16 @@ print_graph_entry_nested(struct trace_iterator *iter, struct fgraph_cpu_data *cpu_data; int cpu = iter->cpu; + /* If a graph tracer ignored set_graph_notrace */ + if (call->depth < -1) + call->depth += FTRACE_NOTRACE_DEPTH; + cpu_data = per_cpu_ptr(data->cpu_data, cpu); cpu_data->depth = call->depth; /* Save this function pointer to see if the exit matches */ - if (call->depth < FTRACE_RETFUNC_DEPTH) + if (call->depth < FTRACE_RETFUNC_DEPTH && + !WARN_ON_ONCE(call->depth < 0)) cpu_data->enter_funcs[call->depth] = call->func; } @@ -1048,7 +1058,8 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, */ cpu_data->depth = trace->depth - 1; - if (trace->depth < FTRACE_RETFUNC_DEPTH) { + if (trace->depth < FTRACE_RETFUNC_DEPTH && + !WARN_ON_ONCE(trace->depth < 0)) { if (cpu_data->enter_funcs[trace->depth] != trace->func) func_match = 0; cpu_data->enter_funcs[trace->depth] = 0; diff --git a/mm/vmscan.c b/mm/vmscan.c index e1a95dbcd5f8..f16e330e1096 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -246,6 +246,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, int nid = shrinkctl->nid; long batch_size = shrinker->batch ? shrinker->batch : SHRINK_BATCH; + long scanned = 0, next_deferred; freeable = shrinker->count_objects(shrinker, shrinkctl); if (freeable == 0) @@ -267,7 +268,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", shrinker->scan_objects, total_scan); total_scan = freeable; - } + next_deferred = nr; + } else + next_deferred = total_scan; /* * We need to avoid excessive windup on filesystem shrinkers @@ -324,17 +327,22 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, count_vm_events(SLABS_SCANNED, nr_to_scan); total_scan -= nr_to_scan; + scanned += nr_to_scan; cond_resched(); } + if (next_deferred >= scanned) + next_deferred -= scanned; + else + next_deferred = 0; /* * move the unused scan count back into the shrinker in a * manner that handles concurrent updates. If we exhausted the * scan, there is no need to do an update. */ - if (total_scan > 0) - new_nr = atomic_long_add_return(total_scan, + if (next_deferred > 0) + new_nr = atomic_long_add_return(next_deferred, &shrinker->nr_deferred[nid]); else new_nr = atomic_long_read(&shrinker->nr_deferred[nid]); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 84201c21705e..940ba74b297c 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2029,6 +2029,19 @@ static int process_connect(struct ceph_connection *con) dout("process_connect on %p tag %d\n", con, (int)con->in_tag); + if (con->auth_reply_buf) { + /* + * Any connection that defines ->get_authorizer() + * should also define ->verify_authorizer_reply(). + * See get_connect_authorizer(). + */ + ret = con->ops->verify_authorizer_reply(con, 0); + if (ret < 0) { + con->error_msg = "bad authorize reply"; + return ret; + } + } + switch (con->in_reply.tag) { case CEPH_MSGR_TAG_FEATURES: pr_err("%s%lld %s feature set mismatch," diff --git a/net/core/dev.c b/net/core/dev.c index 56d820fc2707..0f9289ff0f2a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4059,8 +4059,8 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff NAPI_GRO_CB(skb)->same_flow = 0; NAPI_GRO_CB(skb)->flush = 0; NAPI_GRO_CB(skb)->free = 0; - NAPI_GRO_CB(skb)->udp_mark = 0; NAPI_GRO_CB(skb)->recursion_counter = 0; + NAPI_GRO_CB(skb)->encap_mark = 0; NAPI_GRO_CB(skb)->gro_remcsum_start = 0; /* Setup for GRO checksum validation */ diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 2095cd6c31fd..84e46837610b 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1388,6 +1388,45 @@ out: return pp; } +static struct sk_buff **ipip_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + if (NAPI_GRO_CB(skb)->encap_mark) { + NAPI_GRO_CB(skb)->flush = 1; + return NULL; + } + + NAPI_GRO_CB(skb)->encap_mark = 1; + + return inet_gro_receive(head, skb); +} + +#define SECONDS_PER_DAY 86400 + +/* inet_current_timestamp - Return IP network timestamp + * + * Return milliseconds since midnight in network byte order. + */ +__be32 inet_current_timestamp(void) +{ + u32 secs; + u32 msecs; + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + + /* Get secs since midnight. */ + (void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs); + /* Convert to msecs. */ + msecs = secs * MSEC_PER_SEC; + /* Convert nsec to msec. */ + msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC; + + /* Convert to network byte order. */ + return htons(msecs); +} +EXPORT_SYMBOL(inet_current_timestamp); + int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) { if (sk->sk_family == AF_INET) @@ -1430,6 +1469,13 @@ out_unlock: return err; } +static int ipip_gro_complete(struct sk_buff *skb, int nhoff) +{ + skb->encapsulation = 1; + skb_shinfo(skb)->gso_type |= SKB_GSO_IPIP; + return inet_gro_complete(skb, nhoff); +} + int inet_ctl_sock_create(struct sock **sk, unsigned short family, unsigned short type, unsigned char protocol, struct net *net) @@ -1646,8 +1692,8 @@ static struct packet_offload ip_packet_offload __read_mostly = { static const struct net_offload ipip_offload = { .callbacks = { .gso_segment = inet_gso_segment, - .gro_receive = inet_gro_receive, - .gro_complete = inet_gro_complete, + .gro_receive = ipip_gro_receive, + .gro_complete = ipip_gro_complete, }, }; diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index b22a75c0a3d9..7841b35e5ab0 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -182,6 +182,14 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head, u8 proto = NAPI_GRO_CB(skb)->proto; const struct net_offload **offloads; + /* We can clear the encap_mark for FOU as we are essentially doing + * one of two possible things. We are either adding an L4 tunnel + * header to the outer L3 tunnel header, or we are are simply + * treating the GRE tunnel header as though it is a UDP protocol + * specific header such as VXLAN or GENEVE. + */ + NAPI_GRO_CB(skb)->encap_mark = 0; + rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); @@ -349,6 +357,14 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, } } + /* We can clear the encap_mark for GUE as we are essentially doing + * one of two possible things. We are either adding an L4 tunnel + * header to the outer L3 tunnel header, or we are are simply + * treating the GRE tunnel header as though it is a UDP protocol + * specific header such as VXLAN or GENEVE. + */ + NAPI_GRO_CB(skb)->encap_mark = 0; + rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[guehdr->proto_ctype]); diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 53300b88d569..79ae0d7becbf 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -128,6 +128,11 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, struct packet_offload *ptype; __be16 type; + if (NAPI_GRO_CB(skb)->encap_mark) + goto out; + + NAPI_GRO_CB(skb)->encap_mark = 1; + off = skb_gro_offset(skb); hlen = off + sizeof(*greh); greh = skb_gro_header_fast(skb, off); diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index f5203fba6236..2ba975272ff6 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -907,7 +907,6 @@ static bool icmp_echo(struct sk_buff *skb) */ static bool icmp_timestamp(struct sk_buff *skb) { - struct timespec tv; struct icmp_bxm icmp_param; /* * Too short. @@ -918,9 +917,7 @@ static bool icmp_timestamp(struct sk_buff *skb) /* * Fill in the current time as ms since midnight UT: */ - getnstimeofday(&tv); - icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + - tv.tv_nsec / NSEC_PER_MSEC); + icmp_param.data.times[1] = inet_current_timestamp(); icmp_param.data.times[2] = icmp_param.data.times[1]; if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4)) BUG(); diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index bd246792360b..4d158ff1def1 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -58,10 +58,9 @@ void ip_options_build(struct sk_buff *skb, struct ip_options *opt, if (opt->ts_needaddr) ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, skb, rt); if (opt->ts_needtime) { - struct timespec tv; __be32 midtime; - getnstimeofday(&tv); - midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC); + + midtime = inet_current_timestamp(); memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4); } return; @@ -415,11 +414,10 @@ int ip_options_compile(struct net *net, break; } if (timeptr) { - struct timespec tv; - u32 midtime; - getnstimeofday(&tv); - midtime = (tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC; - put_unaligned_be32(midtime, timeptr); + __be32 midtime; + + midtime = inet_current_timestamp(); + memcpy(timeptr, &midtime, 4); opt->is_changed = 1; } } else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) { diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 2af7b7e1a0f6..dfcab88c3e74 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -299,14 +299,14 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, unsigned int off = skb_gro_offset(skb); int flush = 1; - if (NAPI_GRO_CB(skb)->udp_mark || + if (NAPI_GRO_CB(skb)->encap_mark || (skb->ip_summed != CHECKSUM_PARTIAL && NAPI_GRO_CB(skb)->csum_cnt == 0 && !NAPI_GRO_CB(skb)->csum_valid)) goto out; - /* mark that this skb passed once through the udp gro layer */ - NAPI_GRO_CB(skb)->udp_mark = 1; + /* mark that this skb passed once through the tunnel gro layer */ + NAPI_GRO_CB(skb)->encap_mark = 1; rcu_read_lock(); uo_priv = rcu_dereference(udp_offload_base); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index db0b8428d248..9b01da54d475 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -258,6 +258,19 @@ out: return pp; } +static struct sk_buff **sit_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + if (NAPI_GRO_CB(skb)->encap_mark) { + NAPI_GRO_CB(skb)->flush = 1; + return NULL; + } + + NAPI_GRO_CB(skb)->encap_mark = 1; + + return ipv6_gro_receive(head, skb); +} + static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) { const struct net_offload *ops; diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index dace13d7638e..b7569238a410 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -539,9 +539,13 @@ gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred) return gss_new; gss_msg = gss_add_msg(gss_new); if (gss_msg == gss_new) { - int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg); + int res; + atomic_inc(&gss_msg->count); + res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg); if (res) { gss_unhash_msg(gss_new); + atomic_dec(&gss_msg->count); + gss_release_msg(gss_new); gss_msg = ERR_PTR(res); } } else @@ -834,6 +838,7 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) warn_gssd(); gss_release_msg(gss_msg); } + gss_release_msg(gss_msg); } static void gss_pipe_dentry_destroy(struct dentry *dir, diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c index 8275f0e55106..4b2f44c20caf 100644 --- a/scripts/kconfig/nconf.gui.c +++ b/scripts/kconfig/nconf.gui.c @@ -364,12 +364,14 @@ int dialog_inputbox(WINDOW *main_window, WINDOW *prompt_win; WINDOW *form_win; PANEL *panel; - int i, x, y; + int i, x, y, lines, columns, win_lines, win_cols; int res = -1; int cursor_position = strlen(init); int cursor_form_win; char *result = *resultp; + getmaxyx(stdscr, lines, columns); + if (strlen(init)+1 > *result_len) { *result_len = strlen(init)+1; *resultp = result = realloc(result, *result_len); @@ -386,14 +388,19 @@ int dialog_inputbox(WINDOW *main_window, if (title) prompt_width = max(prompt_width, strlen(title)); + win_lines = min(prompt_lines+6, lines-2); + win_cols = min(prompt_width+7, columns-2); + prompt_lines = max(win_lines-6, 0); + prompt_width = max(win_cols-7, 0); + /* place dialog in middle of screen */ - y = (getmaxy(stdscr)-(prompt_lines+4))/2; - x = (getmaxx(stdscr)-(prompt_width+4))/2; + y = (lines-win_lines)/2; + x = (columns-win_cols)/2; strncpy(result, init, *result_len); /* create the windows */ - win = newwin(prompt_lines+6, prompt_width+7, y, x); + win = newwin(win_lines, win_cols, y, x); prompt_win = derwin(win, prompt_lines+1, prompt_width, 2, 2); form_win = derwin(win, 1, prompt_width, prompt_lines+3, 2); keypad(form_win, TRUE); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2a9ec9706db8..b93458698335 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5876,6 +5876,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0x90a60180}, {0x14, 0x90170120}, {0x21, 0x02211030}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x1b, 0x01011020}, + {0x21, 0x02211010}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, ALC255_STANDARD_PINS, {0x12, 0x90a60160}, diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index 2fbaf2c75d17..5f43e1ced179 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -739,6 +739,9 @@ static int sst_soc_prepare(struct device *dev) struct sst_data *drv = dev_get_drvdata(dev); int i; + if (!drv->soc_card) + return 0; + /* suspend all pcms first */ snd_soc_suspend(drv->soc_card->dev); snd_soc_poweroff(drv->soc_card->dev); @@ -761,6 +764,9 @@ static void sst_soc_complete(struct device *dev) struct sst_data *drv = dev_get_drvdata(dev); int i; + if (!drv->soc_card) + return; + /* restart SSPs */ for (i = 0; i < drv->soc_card->num_rtd; i++) { struct snd_soc_dai *dai = drv->soc_card->rtd[i].cpu_dai; diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c index 2c44139b4041..33db205dd12b 100644 --- a/sound/usb/hiface/pcm.c +++ b/sound/usb/hiface/pcm.c @@ -445,6 +445,8 @@ static int hiface_pcm_prepare(struct snd_pcm_substream *alsa_sub) mutex_lock(&rt->stream_mutex); + hiface_pcm_stream_stop(rt); + sub->dma_off = 0; sub->period_off = 0; diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index e0fc02763024..4a033cbbd361 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -925,9 +925,10 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */ case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */ case USB_ID(0x046d, 0x0991): + case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */ /* Most audio usb devices lie about volume resolution. * Most Logitech webcams have res = 384. - * Proboly there is some logitech magic behind this number --fishor + * Probably there is some logitech magic behind this number --fishor */ if (!strcmp(kctl->id.name, "Mic Capture Volume")) { usb_audio_info(chip,