diff --git a/Makefile b/Makefile index 2ef20781ad25..633b5f0f11a0 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 19 -SUBLEVEL = 4 +SUBLEVEL = 5 EXTRAVERSION = NAME = Diseased Newt diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index cb3142a2d40b..a86d567f6c70 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { int err; - err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs, + err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs, sizeof(sf->uc.uc_mcontext.regs.scratch)); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); @@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) if (!err) set_current_blocked(&set); - err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs), + err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch), sizeof(sf->uc.uc_mcontext.regs.scratch)); return err; @@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn) /* Don't restart from sigreturn */ syscall_wont_restart(regs); + /* + * Ensure that sigreturn always returns to user mode (in case the + * regs saved on user stack got fudged between save and sigreturn) + * Otherwise it is easy to panic the kernel with a custom + * signal handler and/or restorer which clobberes the status32/ret + * to return to a bogus location in kernel mode. + */ + regs->status32 |= STATUS_U_MASK; + return regs->r0; badframe: @@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) /* * handler returns using sigreturn stub provided already by userpsace + * If not, nuke the process right away */ - BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER)); + if(!(ksig->ka.sa.sa_flags & SA_RESTORER)) + return 1; + regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; /* User Stack for signal handler will be above the frame just carved */ @@ -296,12 +308,12 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); - int ret; + int failed; /* Set up the stack frame */ - ret = setup_rt_frame(ksig, oldset, regs); + failed = setup_rt_frame(ksig, oldset, regs); - signal_setup_done(ret, ksig, 0); + signal_setup_done(failed, ksig, 0); } void do_signal(struct pt_regs *regs) diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig index a77604fbaf25..81502b90dd91 100644 --- a/arch/arm/mach-sunxi/Kconfig +++ b/arch/arm/mach-sunxi/Kconfig @@ -1,10 +1,12 @@ menuconfig ARCH_SUNXI bool "Allwinner SoCs" if ARCH_MULTI_V7 select ARCH_REQUIRE_GPIOLIB + select ARCH_HAS_RESET_CONTROLLER select CLKSRC_MMIO select GENERIC_IRQ_CHIP select PINCTRL select SUN4I_TIMER + select RESET_CONTROLLER if ARCH_SUNXI @@ -20,10 +22,8 @@ config MACH_SUN5I config MACH_SUN6I bool "Allwinner A31 (sun6i) SoCs support" default ARCH_SUNXI - select ARCH_HAS_RESET_CONTROLLER select ARM_GIC select MFD_SUN6I_PRCM - select RESET_CONTROLLER select SUN5I_HSTIMER config MACH_SUN7I @@ -37,16 +37,12 @@ config MACH_SUN7I config MACH_SUN8I bool "Allwinner A23 (sun8i) SoCs support" default ARCH_SUNXI - select ARCH_HAS_RESET_CONTROLLER select ARM_GIC select MFD_SUN6I_PRCM - select RESET_CONTROLLER config MACH_SUN9I bool "Allwinner (sun9i) SoCs support" default ARCH_SUNXI - select ARCH_HAS_RESET_CONTROLLER select ARM_GIC - select RESET_CONTROLLER endif diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h index 2bf8e9307be9..4c8ad592ae33 100644 --- a/arch/powerpc/include/asm/cputhreads.h +++ b/arch/powerpc/include/asm/cputhreads.h @@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads) static inline int cpu_nr_cores(void) { - return NR_CPUS >> threads_shift; + return nr_cpu_ids >> threads_shift; } static inline cpumask_t cpu_online_cores_map(void) diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h new file mode 100644 index 000000000000..744fd54de374 --- /dev/null +++ b/arch/powerpc/include/asm/irq_work.h @@ -0,0 +1,9 @@ +#ifndef _ASM_POWERPC_IRQ_WORK_H +#define _ASM_POWERPC_IRQ_WORK_H + +static inline bool arch_irq_work_has_interrupt(void) +{ + return true; +} + +#endif /* _ASM_POWERPC_IRQ_WORK_H */ diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 9ce5afe167ff..b36365f49478 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -639,10 +639,7 @@ static void pci_claim_bus_resources(struct pci_bus *bus) (unsigned long long)r->end, (unsigned int)r->flags); - if (pci_claim_resource(dev, i) == 0) - continue; - - pci_claim_bridge_resource(dev, i); + pci_claim_resource(dev, i); } } diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index bae6c609888e..86db4bcd7ce5 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { }, }, + /* ASRock */ + { /* Handle problems with rebooting on ASRock Q1900DC-ITX */ + .callback = set_pci_reboot, + .ident = "ASRock Q1900DC-ITX", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"), + DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"), + }, + }, + /* ASUS */ { /* Handle problems with rebooting on ASUS P4S800 */ .callback = set_bios_reboot, diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index cc45a9f1a6e6..0537a0911c96 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -93,6 +93,12 @@ EXPORT_SYMBOL_GPL(xen_p2m_size); unsigned long xen_max_p2m_pfn __read_mostly; EXPORT_SYMBOL_GPL(xen_max_p2m_pfn); +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT +#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT +#else +#define P2M_LIMIT 0 +#endif + static DEFINE_SPINLOCK(p2m_update_lock); static unsigned long *p2m_mid_missing_mfn; @@ -387,9 +393,11 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m) void __init xen_vmalloc_p2m_tree(void) { static struct vm_struct vm; + unsigned long p2m_limit; + p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; vm.flags = VM_ALLOC; - vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn, + vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), PMD_SIZE * PMDS_PER_MID_PAGE); vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE); pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size); diff --git a/block/blk-merge.c b/block/blk-merge.c index 89b97b5e0881..2be75ff7f171 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -609,7 +609,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { struct bio_vec *bprev; - bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1]; + bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1]; if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) return false; } diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 3a415ecfe3d4..e6b6283cb5e8 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -265,9 +265,11 @@ static int bt_get(struct blk_mq_alloc_data *data, /* * We're out of tags on this hardware queue, kick any * pending IO submits before going to sleep waiting for - * some to complete. + * some to complete. Note that hctx can be NULL here for + * reserved tag allocation. */ - blk_mq_run_hw_queue(hctx, false); + if (hctx) + blk_mq_run_hw_queue(hctx, false); /* * Retry tag allocation after running the hardware queue, diff --git a/block/blk-mq.c b/block/blk-mq.c index 2390c5541e71..447f5336960a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1937,7 +1937,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) */ if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) - goto err_map; + goto err_mq_usage; setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); blk_queue_rq_timeout(q, 30000); @@ -1980,7 +1980,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) blk_mq_init_cpu_queues(q, set->nr_hw_queues); if (blk_mq_init_hw_queues(q, set)) - goto err_hw; + goto err_mq_usage; mutex_lock(&all_q_mutex); list_add_tail(&q->all_q_node, &all_q_list); @@ -1992,7 +1992,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) return q; -err_hw: +err_mq_usage: blk_cleanup_queue(q); err_hctxs: kfree(map); diff --git a/block/blk-settings.c b/block/blk-settings.c index 6ed2cbe5e8c9..12600bfffca9 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, b->physical_block_size); t->io_min = max(t->io_min, b->io_min); - t->io_opt = lcm(t->io_opt, b->io_opt); + t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); t->cluster &= b->cluster; t->discard_zeroes_data &= b->discard_zeroes_data; @@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, b->raid_partial_stripes_expensive); /* Find lowest common alignment_offset */ - t->alignment_offset = lcm(t->alignment_offset, alignment) + t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) % max(t->physical_block_size, t->io_min); /* Verify that new alignment_offset is on a logical block boundary */ @@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, b->max_discard_sectors); t->discard_granularity = max(t->discard_granularity, b->discard_granularity); - t->discard_alignment = lcm(t->discard_alignment, alignment) % + t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % t->discard_granularity; } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 87b704e41877..b27ab7a0741d 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -962,7 +962,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) return -EINVAL; drv->safe_state_index = -1; - for (i = 0; i < CPUIDLE_STATE_MAX; i++) { + for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) { drv->states[i].name[0] = '\0'; drv->states[i].desc[0] = '\0'; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index d1a05f9bb91f..00f2f740b425 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4233,9 +4233,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, /* devices that don't properly handle queued TRIM commands */ - { "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, - { "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, /* * As defined, the DRAT (Deterministic Read After Trim) and RZAT @@ -4255,6 +4264,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { */ { "INTEL*SSDSC2MH*", NULL, 0, }, + { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 4bc2a5cb9935..a98c41f72c63 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -803,10 +803,6 @@ static int __init nbd_init(void) return -EINVAL; } - nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); - if (!nbd_dev) - return -ENOMEM; - part_shift = 0; if (max_part > 0) { part_shift = fls(max_part); @@ -828,6 +824,10 @@ static int __init nbd_init(void) if (nbds_max > 1UL << (MINORBITS - part_shift)) return -EINVAL; + nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); + if (!nbd_dev) + return -ENOMEM; + for (i = 0; i < nbds_max; i++) { struct gendisk *disk = alloc_disk(1 << part_shift); if (!disk) diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index c0a842b335c5..a52154caf526 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -129,12 +129,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, return DIV_ROUND_UP(parent_rate, div); } -/* - * The reverse of DIV_ROUND_UP: The maximum number which - * divided by m is r - */ -#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1) - static bool _is_valid_table_div(const struct clk_div_table *table, unsigned int div) { @@ -208,6 +202,7 @@ static int _div_round_closest(struct clk_divider *divider, unsigned long parent_rate, unsigned long rate) { int up, down, div; + unsigned long up_rate, down_rate; up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); @@ -219,7 +214,10 @@ static int _div_round_closest(struct clk_divider *divider, down = _round_down_table(divider->table, div); } - return (up - div) <= (div - down) ? up : down; + up_rate = DIV_ROUND_UP(parent_rate, up); + down_rate = DIV_ROUND_UP(parent_rate, down); + + return (rate - up_rate) <= (down_rate - rate) ? up : down; } static int _div_round(struct clk_divider *divider, unsigned long parent_rate, @@ -300,7 +298,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, return i; } parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), - MULT_ROUND_UP(rate, i)); + rate * i); now = DIV_ROUND_UP(parent_rate, i); if (_is_best_div(divider, rate, now, best)) { bestdiv = i; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 7030c409be24..24736bb88cb2 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1724,15 +1724,18 @@ void cpufreq_resume(void) || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) pr_err("%s: Failed to start governor for policy: %p\n", __func__, policy); - - /* - * schedule call cpufreq_update_policy() for boot CPU, i.e. last - * policy in list. It will verify that the current freq is in - * sync with what we believe it to be. - */ - if (list_is_last(&policy->policy_list, &cpufreq_policy_list)) - schedule_work(&policy->update); } + + /* + * schedule call cpufreq_update_policy() for first-online CPU, as that + * wouldn't be hotplugged-out on suspend. It will verify that the + * current freq is in sync with what we believe it to be. + */ + policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask)); + if (WARN_ON(!policy)) + return; + + schedule_work(&policy->update); } /** diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 125150dc6e81..9ab99642ca7a 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -297,9 +297,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev) if (!dev->registered) return -EINVAL; - if (!dev->state_count) - dev->state_count = drv->state_count; - ret = cpuidle_add_device_sysfs(dev); if (ret) return ret; diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 97c5903b4606..832a2c3f01ff 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -401,7 +401,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device) struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device); /* state statistics */ - for (i = 0; i < device->state_count; i++) { + for (i = 0; i < drv->state_count; i++) { kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); if (!kobj) goto error_state; @@ -433,9 +433,10 @@ error_state: */ static void cpuidle_remove_state_sysfs(struct cpuidle_device *device) { + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device); int i; - for (i = 0; i < device->state_count; i++) + for (i = 0; i < drv->state_count; i++) cpuidle_free_state_kobj(device, i); } diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index b969206439b7..b2ce6e10970f 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -258,6 +258,13 @@ static int edma_terminate_all(struct edma_chan *echan) */ if (echan->edesc) { int cyclic = echan->edesc->cyclic; + + /* + * free the running request descriptor + * since it is not in any of the vdesc lists + */ + edma_desc_free(&echan->edesc->vdesc); + echan->edesc = NULL; edma_stop(echan->ch_num); /* Move the cyclic channel back to default queue */ diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index c0016a68b446..d35275dbed7e 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -978,6 +978,7 @@ static int omap_dma_terminate_all(struct omap_chan *c) * c->desc is NULL and exit.) */ if (c->desc) { + omap_dma_desc_free(&c->desc->vd); c->desc = NULL; /* Avoid stopping the dma twice */ if (!c->paused) diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 69fac068669f..2eebd28b4c40 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -86,10 +86,13 @@ static void dmi_table(u8 *buf, u32 len, int num, int i = 0; /* - * Stop when we see all the items the table claimed to have - * OR we run off the end of the table (also happens) + * Stop when we have seen all the items the table claimed to have + * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run + * off the end of the table (should never happen but sometimes does + * on bogus implementations.) */ - while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) { + while ((!num || i < num) && + (data - buf + sizeof(struct dmi_header)) <= len) { const struct dmi_header *dm = (const struct dmi_header *)data; /* @@ -529,21 +532,10 @@ static int __init dmi_smbios3_present(const u8 *buf) if (memcmp(buf, "_SM3_", 5) == 0 && buf[6] < 32 && dmi_checksum(buf, buf[6])) { dmi_ver = get_unaligned_be16(buf + 7); + dmi_num = 0; /* No longer specified */ dmi_len = get_unaligned_le32(buf + 12); dmi_base = get_unaligned_le64(buf + 16); - /* - * The 64-bit SMBIOS 3.0 entry point no longer has a field - * containing the number of structures present in the table. - * Instead, it defines the table size as a maximum size, and - * relies on the end-of-table structure type (#127) to be used - * to signal the end of the table. - * So let's define dmi_num as an upper bound as well: each - * structure has a 4 byte header, so dmi_len / 4 is an upper - * bound for the number of structures in the table. - */ - dmi_num = dmi_len / 4; - if (dmi_walk_early(dmi_decode) == 0) { pr_info("SMBIOS %d.%d present.\n", dmi_ver >> 8, dmi_ver & 0xFF); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 0fd592799d58..c80a8a31890d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -56,9 +56,9 @@ static inline unsigned int get_pipes_num(struct device_queue_manager *dqm) return dqm->dev->shared_resources.compute_pipe_count; } -static inline unsigned int get_first_pipe(struct device_queue_manager *dqm) +unsigned int get_first_pipe(struct device_queue_manager *dqm) { - BUG_ON(!dqm); + BUG_ON(!dqm || !dqm->dev); return dqm->dev->shared_resources.first_compute_pipe; } @@ -693,7 +693,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm) INIT_LIST_HEAD(&dqm->queues); dqm->queue_count = dqm->processes_count = 0; dqm->active_runlist = false; - retval = init_pipelines(dqm, get_pipes_num(dqm), 0); + retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); if (retval != 0) goto fail_init_pipelines; diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 29168fae3dcb..27a37e5f274d 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -531,17 +531,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb) } EXPORT_SYMBOL(drm_framebuffer_reference); -static void drm_framebuffer_free_bug(struct kref *kref) -{ - BUG(); -} - -static void __drm_framebuffer_unreference(struct drm_framebuffer *fb) -{ - DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount)); - kref_put(&fb->refcount, drm_framebuffer_free_bug); -} - /** * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr * @fb: fb to unregister @@ -1322,7 +1311,7 @@ void drm_plane_force_disable(struct drm_plane *plane) return; } /* disconnect the plane from the fb and crtc: */ - __drm_framebuffer_unreference(plane->old_fb); + drm_framebuffer_unreference(plane->old_fb); plane->old_fb = NULL; plane->fb = NULL; plane->crtc = NULL; diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 732cb6f8e653..4c0aa97aaf03 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector) drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); kfree(edid); return ret; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 7483a47de8e4..9bca854737f0 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -152,6 +152,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; count = drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); } else count = (*connector_funcs->get_modes)(connector); } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 4e6405e7226f..391d47e1f131 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1093,6 +1093,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) /* Gunit-Display CZ domain, 0x182028-0x1821CF */ s->gu_ctl0 = I915_READ(VLV_GU_CTL0); s->gu_ctl1 = I915_READ(VLV_GU_CTL1); + s->pcbr = I915_READ(VLV_PCBR); s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); /* @@ -1187,6 +1188,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) /* Gunit-Display CZ domain, 0x182028-0x1821CF */ I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); + I915_WRITE(VLV_PCBR, s->pcbr); I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); } @@ -1195,19 +1197,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) u32 val; int err; - val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); - WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on); - #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) - /* Wait for a previous force-off to settle */ - if (force_on) { - err = wait_for(!COND, 20); - if (err) { - DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n", - I915_READ(VLV_GTLC_SURVIVABILITY_REG)); - return err; - } - } val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); val &= ~VLV_GFX_CLK_FORCE_ON_BIT; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ddd005ce3a94..44a3c385eea2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -985,6 +985,7 @@ struct vlv_s0ix_state { /* Display 2 CZ domain */ u32 gu_ctl0; u32 gu_ctl1; + u32 pcbr; u32 clock_gate_dis2; }; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index a2045848bd1a..f75bf292285d 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -485,10 +485,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, stolen_offset, gtt_offset, size); /* KISS and expect everything to be page-aligned */ - BUG_ON(stolen_offset & 4095); - BUG_ON(size & 4095); - - if (WARN_ON(size == 0)) + if (WARN_ON(size == 0 || stolen_offset & 4095 || size & 4095)) return NULL; stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c10b52ef116d..791b00e612d8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2339,13 +2339,19 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_gem_object *obj = NULL; struct drm_mode_fb_cmd2 mode_cmd = { 0 }; - u32 base = plane_config->base; + u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); + u32 size_aligned = round_up(plane_config->base + plane_config->size, + PAGE_SIZE); + + size_aligned -= base_aligned; if (plane_config->size == 0) return false; - obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, - plane_config->size); + obj = i915_gem_object_create_stolen_for_preallocated(dev, + base_aligned, + base_aligned, + size_aligned); if (!obj) return false; @@ -4366,15 +4372,15 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config.has_pch_encoder) ironlake_pch_enable(crtc); + assert_vblank_disabled(crtc); + drm_crtc_vblank_on(crtc); + for_each_encoder_on_crtc(dev, crtc, encoder) encoder->enable(encoder); if (HAS_PCH_CPT(dev)) cpt_verify_modeset(dev, intel_crtc->pipe); - assert_vblank_disabled(crtc); - drm_crtc_vblank_on(crtc); - intel_crtc_enable_planes(crtc); } @@ -4486,14 +4492,14 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config.dp_encoder_is_mst) intel_ddi_set_vc_payload_alloc(crtc, true); + assert_vblank_disabled(crtc); + drm_crtc_vblank_on(crtc); + for_each_encoder_on_crtc(dev, crtc, encoder) { encoder->enable(encoder); intel_opregion_notify_encoder(encoder, true); } - assert_vblank_disabled(crtc); - drm_crtc_vblank_on(crtc); - /* If we change the relative order between pipe/planes enabling, we need * to change the workaround. */ haswell_mode_set_planes_workaround(intel_crtc); @@ -4544,12 +4550,12 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) intel_crtc_disable_planes(crtc); - drm_crtc_vblank_off(crtc); - assert_vblank_disabled(crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) encoder->disable(encoder); + drm_crtc_vblank_off(crtc); + assert_vblank_disabled(crtc); + if (intel_crtc->config.has_pch_encoder) intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); @@ -4608,14 +4614,14 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) intel_crtc_disable_planes(crtc); - drm_crtc_vblank_off(crtc); - assert_vblank_disabled(crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) { intel_opregion_notify_encoder(encoder, false); encoder->disable(encoder); } + drm_crtc_vblank_off(crtc); + assert_vblank_disabled(crtc); + if (intel_crtc->config.has_pch_encoder) intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, false); @@ -5083,12 +5089,12 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_update_watermarks(crtc); intel_enable_pipe(intel_crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->enable(encoder); - assert_vblank_disabled(crtc); drm_crtc_vblank_on(crtc); + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->enable(encoder); + intel_crtc_enable_planes(crtc); /* Underruns don't raise interrupts, so check manually. */ @@ -5144,12 +5150,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) intel_update_watermarks(crtc); intel_enable_pipe(intel_crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->enable(encoder); - assert_vblank_disabled(crtc); drm_crtc_vblank_on(crtc); + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->enable(encoder); + intel_crtc_enable_planes(crtc); /* @@ -5221,12 +5227,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) */ intel_wait_for_vblank(dev, pipe); - drm_crtc_vblank_off(crtc); - assert_vblank_disabled(crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) encoder->disable(encoder); + drm_crtc_vblank_off(crtc); + assert_vblank_disabled(crtc); + intel_disable_pipe(intel_crtc); i9xx_pfit_disable(intel_crtc); @@ -6660,8 +6666,7 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc, aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, plane_config->tiled); - plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] * - aligned_height); + plane_config->size = crtc->base.primary->fb->pitches[0] * aligned_height; DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe, plane, crtc->base.primary->fb->width, @@ -7711,8 +7716,7 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc, aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, plane_config->tiled); - plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] * - aligned_height); + plane_config->size = crtc->base.primary->fb->pitches[0] * aligned_height; DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe, plane, crtc->base.primary->fb->width, diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 7d9c340f7693..2d28c3377ff8 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -1495,7 +1495,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, drm_modeset_lock_all(dev); plane = drm_plane_find(dev, set->plane_id); - if (!plane) { + if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) { ret = -ENOENT; goto out_unlock; } @@ -1522,7 +1522,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data, drm_modeset_lock_all(dev); plane = drm_plane_find(dev, get->plane_id); - if (!plane) { + if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) { ret = -ENOENT; goto out_unlock; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3f2a8d3febca..6cb4dc1ec1d0 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1565,6 +1565,7 @@ struct radeon_dpm { int new_active_crtc_count; u32 current_active_crtcs; int current_active_crtc_count; + bool single_display; struct radeon_dpm_dynamic_state dyn_state; struct radeon_dpm_fan fan; u32 tdp_limit; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 63ccb8fa799c..d27e4ccb848c 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) static bool radeon_read_bios(struct radeon_device *rdev) { - uint8_t __iomem *bios; + uint8_t __iomem *bios, val1, val2; size_t size; rdev->bios = NULL; @@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev) return false; } - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + val1 = readb(&bios[0]); + val2 = readb(&bios[1]); + + if (size == 0 || val1 != 0x55 || val2 != 0xaa) { pci_unmap_rom(rdev->pdev, bios); return false; } - rdev->bios = kmemdup(bios, size, GFP_KERNEL); + rdev->bios = kzalloc(size, GFP_KERNEL); if (rdev->bios == NULL) { pci_unmap_rom(rdev->pdev, bios); return false; } + memcpy_fromio(rdev->bios, bios, size); pci_unmap_rom(rdev->pdev, bios); return true; } diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index a69bd441dd2d..572b4dbec186 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { struct radeon_bo *bo; - struct fence *fence; int r; bo = container_of(it, struct radeon_bo, mn_it); @@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, continue; } - fence = reservation_object_get_excl(bo->tbo.resv); - if (fence) { - r = radeon_fence_wait((struct radeon_fence *)fence, false); - if (r) - DRM_ERROR("(%d) failed to wait for user bo\n", r); - } + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, + false, MAX_SCHEDULE_TIMEOUT); + if (r) + DRM_ERROR("(%d) failed to wait for user bo\n", r); radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f7da8fe96a66..1d94b542cd82 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -704,12 +704,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work) radeon_pm_compute_clocks(rdev); } -static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, - enum radeon_pm_state_type dpm_state) +static bool radeon_dpm_single_display(struct radeon_device *rdev) { - int i; - struct radeon_ps *ps; - u32 ui_class; bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? true : false; @@ -719,6 +715,17 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, single_display = false; } + return single_display; +} + +static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, + enum radeon_pm_state_type dpm_state) +{ + int i; + struct radeon_ps *ps; + u32 ui_class; + bool single_display = radeon_dpm_single_display(rdev); + /* certain older asics have a separare 3D performance state, * so try that first if the user selected performance */ @@ -844,6 +851,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) struct radeon_ps *ps; enum radeon_pm_state_type dpm_state; int ret; + bool single_display = radeon_dpm_single_display(rdev); /* if dpm init failed */ if (!rdev->pm.dpm_enabled) @@ -868,6 +876,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) /* vce just modifies an existing state so force a change */ if (ps->vce_active != rdev->pm.dpm.vce_active) goto force; + /* user has made a display change (such as timing) */ + if (rdev->pm.dpm.single_display != single_display) + goto force; if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { /* for pre-BTC and APUs if the num crtcs changed but state is the same, * all we need to do is update the display configuration. @@ -930,6 +941,7 @@ force: rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; + rdev->pm.dpm.single_display = single_display; /* wait for the rings to drain */ for (i = 0; i < RADEON_NUM_RINGS; i++) { diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index d02aa1d0f588..b292aca0f342 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -598,6 +598,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + /* double check that we don't free the table twice */ + if (!ttm->sg->sgl) + return; + /* free the sg table and pages again */ dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c index 066d0c04072c..9f5b41bde7ba 100644 --- a/drivers/iio/accel/bmc150-accel.c +++ b/drivers/iio/accel/bmc150-accel.c @@ -168,14 +168,14 @@ static const struct { int val; int val2; u8 bw_bits; -} bmc150_accel_samp_freq_table[] = { {7, 810000, 0x08}, - {15, 630000, 0x09}, - {31, 250000, 0x0A}, - {62, 500000, 0x0B}, - {125, 0, 0x0C}, - {250, 0, 0x0D}, - {500, 0, 0x0E}, - {1000, 0, 0x0F} }; +} bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08}, + {31, 260000, 0x09}, + {62, 500000, 0x0A}, + {125, 0, 0x0B}, + {250, 0, 0x0C}, + {500, 0, 0x0D}, + {1000, 0, 0x0E}, + {2000, 0, 0x0F} }; static const struct { int bw_bits; @@ -840,7 +840,7 @@ static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev, } static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( - "7.810000 15.630000 31.250000 62.500000 125 250 500 1000"); + "15.620000 31.260000 62.50000 125 250 500 1000 2000"); static struct attribute *bmc150_accel_attributes[] = { &iio_const_attr_sampling_frequency_available.dev_attr.attr, diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 8ec353c01d98..e63b8e76d4c3 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c @@ -141,9 +141,13 @@ struct vf610_adc { struct regulator *vref; struct vf610_adc_feature adc_feature; + u32 sample_freq_avail[5]; + struct completion completion; }; +static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 }; + #define VF610_ADC_CHAN(_idx, _chan_type) { \ .type = (_chan_type), \ .indexed = 1, \ @@ -180,35 +184,47 @@ static const struct iio_chan_spec vf610_adc_iio_channels[] = { /* sentinel */ }; -/* - * ADC sample frequency, unit is ADCK cycles. - * ADC clk source is ipg clock, which is the same as bus clock. - * - * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder) - * SFCAdder: fixed to 6 ADCK cycles - * AverageNum: 1, 4, 8, 16, 32 samples for hardware average. - * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode - * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles - * - * By default, enable 12 bit resolution mode, clock source - * set to ipg clock, So get below frequency group: - */ -static const u32 vf610_sample_freq_avail[5] = -{1941176, 559332, 286957, 145374, 73171}; +static inline void vf610_adc_calculate_rates(struct vf610_adc *info) +{ + unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk); + int i; + + /* + * Calculate ADC sample frequencies + * Sample time unit is ADCK cycles. ADCK clk source is ipg clock, + * which is the same as bus clock. + * + * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder) + * SFCAdder: fixed to 6 ADCK cycles + * AverageNum: 1, 4, 8, 16, 32 samples for hardware average. + * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode + * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles + */ + adck_rate = ipg_rate / info->adc_feature.clk_div; + for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++) + info->sample_freq_avail[i] = + adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3)); +} static inline void vf610_adc_cfg_init(struct vf610_adc *info) { + struct vf610_adc_feature *adc_feature = &info->adc_feature; + /* set default Configuration for ADC controller */ - info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET; - info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET; + adc_feature->clk_sel = VF610_ADCIOC_BUSCLK_SET; + adc_feature->vol_ref = VF610_ADCIOC_VR_VREF_SET; + + adc_feature->calibration = true; + adc_feature->ovwren = true; + + adc_feature->res_mode = 12; + adc_feature->sample_rate = 1; + adc_feature->lpm = true; - info->adc_feature.calibration = true; - info->adc_feature.ovwren = true; + /* Use a save ADCK which is below 20MHz on all devices */ + adc_feature->clk_div = 8; - info->adc_feature.clk_div = 1; - info->adc_feature.res_mode = 12; - info->adc_feature.sample_rate = 1; - info->adc_feature.lpm = true; + vf610_adc_calculate_rates(info); } static void vf610_adc_cfg_post_set(struct vf610_adc *info) @@ -290,12 +306,10 @@ static void vf610_adc_cfg_set(struct vf610_adc *info) cfg_data = readl(info->regs + VF610_REG_ADC_CFG); - /* low power configuration */ cfg_data &= ~VF610_ADC_ADLPC_EN; if (adc_feature->lpm) cfg_data |= VF610_ADC_ADLPC_EN; - /* disable high speed */ cfg_data &= ~VF610_ADC_ADHSC_EN; writel(cfg_data, info->regs + VF610_REG_ADC_CFG); @@ -435,10 +449,27 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171"); +static ssize_t vf610_show_samp_freq_avail(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct vf610_adc *info = iio_priv(dev_to_iio_dev(dev)); + size_t len = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(info->sample_freq_avail); i++) + len += scnprintf(buf + len, PAGE_SIZE - len, + "%u ", info->sample_freq_avail[i]); + + /* replace trailing space by newline */ + buf[len - 1] = '\n'; + + return len; +} + +static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(vf610_show_samp_freq_avail); static struct attribute *vf610_attributes[] = { - &iio_const_attr_sampling_frequency_available.dev_attr.attr, + &iio_dev_attr_sampling_frequency_available.dev_attr.attr, NULL }; @@ -502,7 +533,7 @@ static int vf610_read_raw(struct iio_dev *indio_dev, return IIO_VAL_FRACTIONAL_LOG2; case IIO_CHAN_INFO_SAMP_FREQ: - *val = vf610_sample_freq_avail[info->adc_feature.sample_rate]; + *val = info->sample_freq_avail[info->adc_feature.sample_rate]; *val2 = 0; return IIO_VAL_INT; @@ -525,9 +556,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_SAMP_FREQ: for (i = 0; - i < ARRAY_SIZE(vf610_sample_freq_avail); + i < ARRAY_SIZE(info->sample_freq_avail); i++) - if (val == vf610_sample_freq_avail[i]) { + if (val == info->sample_freq_avail[i]) { info->adc_feature.sample_rate = i; vf610_adc_sample_set(info); return 0; diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c index e0017c22bb9c..f53e9a803a0e 100644 --- a/drivers/iio/imu/adis_trigger.c +++ b/drivers/iio/imu/adis_trigger.c @@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) iio_trigger_set_drvdata(adis->trig, adis); ret = iio_trigger_register(adis->trig); - indio_dev->trig = adis->trig; + indio_dev->trig = iio_trigger_get(adis->trig); if (ret) goto error_free_irq; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c index 0cd306a72a6e..ba27e277511f 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c @@ -24,6 +24,16 @@ #include #include "inv_mpu_iio.h" +static void inv_clear_kfifo(struct inv_mpu6050_state *st) +{ + unsigned long flags; + + /* take the spin lock sem to avoid interrupt kick in */ + spin_lock_irqsave(&st->time_stamp_lock, flags); + kfifo_reset(&st->timestamps); + spin_unlock_irqrestore(&st->time_stamp_lock, flags); +} + int inv_reset_fifo(struct iio_dev *indio_dev) { int result; @@ -50,6 +60,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev) INV_MPU6050_BIT_FIFO_RST); if (result) goto reset_fifo_fail; + + /* clear timestamps fifo */ + inv_clear_kfifo(st); + /* enable interrupt */ if (st->chip_config.accl_fifo_enable || st->chip_config.gyro_fifo_enable) { @@ -83,16 +97,6 @@ reset_fifo_fail: return result; } -static void inv_clear_kfifo(struct inv_mpu6050_state *st) -{ - unsigned long flags; - - /* take the spin lock sem to avoid interrupt kick in */ - spin_lock_irqsave(&st->time_stamp_lock, flags); - kfifo_reset(&st->timestamps); - spin_unlock_irqrestore(&st->time_stamp_lock, flags); -} - /** * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt. */ @@ -184,7 +188,6 @@ end_session: flush_fifo: /* Flush HW and SW FIFOs. */ inv_reset_fifo(indio_dev); - inv_clear_kfifo(st); mutex_unlock(&indio_dev->mlock); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index af3e76d652ba..f009d053384a 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -832,8 +832,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, * @attr_list: List of IIO device attributes * * This function frees the memory allocated for each of the IIO device - * attributes in the list. Note: if you want to reuse the list after calling - * this function you have to reinitialize it using INIT_LIST_HEAD(). + * attributes in the list. */ void iio_free_chan_devattr_list(struct list_head *attr_list) { @@ -841,6 +840,7 @@ void iio_free_chan_devattr_list(struct list_head *attr_list) list_for_each_entry_safe(p, n, attr_list, l) { kfree(p->dev_attr.attr.name); + list_del(&p->l); kfree(p); } } @@ -921,6 +921,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) iio_free_chan_devattr_list(&indio_dev->channel_attr_list); kfree(indio_dev->chan_attr_group.attrs); + indio_dev->chan_attr_group.attrs = NULL; } static void iio_dev_release(struct device *device) diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index 0c1e37e3120a..35c02aeec75e 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c @@ -493,6 +493,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev) error_free_setup_event_lines: iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); kfree(indio_dev->event_interface); + indio_dev->event_interface = NULL; return ret; } diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index aec7a6aa2951..8c014b5dab4c 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, if (dmasync) dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); + /* + * If the combination of the addr and size requested for this memory + * region causes an integer overflow, return error. + */ + if ((PAGE_ALIGN(addr + size) <= size) || + (PAGE_ALIGN(addr + size) <= addr)) + return ERR_PTR(-EINVAL); + if (!can_do_mlock()) return ERR_PTR(-EPERM); diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 82a7dd87089b..729382c06c5e 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -64,6 +64,14 @@ enum { #define GUID_TBL_BLK_NUM_ENTRIES 8 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) +/* Counters should be saturate once they reach their maximum value */ +#define ASSIGN_32BIT_COUNTER(counter, value) do {\ + if ((value) > U32_MAX) \ + counter = cpu_to_be32(U32_MAX); \ + else \ + counter = cpu_to_be32(value); \ +} while (0) + struct mlx4_mad_rcv_buf { struct ib_grh grh; u8 payload[256]; @@ -806,10 +814,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, static void edit_counter(struct mlx4_counter *cnt, struct ib_pma_portcounters *pma_cnt) { - pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2)); - pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2)); - pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames)); - pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, + (be64_to_cpu(cnt->tx_bytes) >> 2)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, + (be64_to_cpu(cnt->rx_bytes) >> 2)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, + be64_to_cpu(cnt->tx_frames)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, + be64_to_cpu(cnt->rx_frames)); } static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 40dfbc0444c0..8dd078b83e3b 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1749,8 +1749,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width) static void domain_exit(struct dmar_domain *domain) { struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu; struct page *freelist = NULL; + int i; /* Domain 0 is reserved, so dont process it */ if (!domain) @@ -1770,8 +1770,8 @@ static void domain_exit(struct dmar_domain *domain) /* clear attached or cached domains */ rcu_read_lock(); - for_each_active_iommu(iommu, drhd) - iommu_detach_domain(domain, iommu); + for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) + iommu_detach_domain(domain, g_iommus[i]); rcu_read_unlock(); dma_free_pagelist(freelist); diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c index e4901a503c73..63c0ee5d0bf5 100644 --- a/drivers/media/pci/cx23885/cx23885-417.c +++ b/drivers/media/pci/cx23885/cx23885-417.c @@ -1339,14 +1339,13 @@ static int vidioc_querycap(struct file *file, void *priv, strlcpy(cap->driver, dev->name, sizeof(cap->driver)); strlcpy(cap->card, cx23885_boards[tsport->dev->board].name, sizeof(cap->card)); - sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci)); - cap->capabilities = - V4L2_CAP_VIDEO_CAPTURE | - V4L2_CAP_READWRITE | - V4L2_CAP_STREAMING | - 0; + sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci)); + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | + V4L2_CAP_STREAMING; if (dev->tuner_type != TUNER_ABSENT) - cap->capabilities |= V4L2_CAP_TUNER; + cap->device_caps |= V4L2_CAP_TUNER; + cap->capabilities = cap->device_caps | V4L2_CAP_VBI_CAPTURE | + V4L2_CAP_AUDIO | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h index 15f7663dd9f5..24262bbb1a35 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h @@ -29,7 +29,7 @@ /* Offset base used to differentiate between CAPTURE and OUTPUT * while mmaping */ -#define DST_QUEUE_OFF_BASE (TASK_SIZE / 2) +#define DST_QUEUE_OFF_BASE (1 << 30) #define MFC_BANK1_ALLOC_CTX 0 #define MFC_BANK2_ALLOC_CTX 1 diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c index aaa1f6f25a29..4d507fef0c84 100644 --- a/drivers/media/platform/sh_veu.c +++ b/drivers/media/platform/sh_veu.c @@ -1179,6 +1179,7 @@ static int sh_veu_probe(struct platform_device *pdev) } *vdev = sh_veu_videodev; + vdev->v4l2_dev = &veu->v4l2_dev; spin_lock_init(&veu->lock); mutex_init(&veu->fop_lock); vdev->lock = &veu->fop_lock; diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c index b3db51c82bde..f9dfebb0bac2 100644 --- a/drivers/media/platform/soc_camera/soc_camera.c +++ b/drivers/media/platform/soc_camera/soc_camera.c @@ -1681,7 +1681,7 @@ eclkreg: eaddpdev: platform_device_put(sasc->pdev); eallocpdev: - devm_kfree(ici->v4l2_dev.dev, sasc); + devm_kfree(ici->v4l2_dev.dev, info); dev_err(ici->v4l2_dev.dev, "group probe failed: %d\n", ret); return ret; diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index bc08a829bc13..cc16e76a2493 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -3230,18 +3230,13 @@ int vb2_thread_stop(struct vb2_queue *q) if (threadio == NULL) return 0; - call_void_qop(q, wait_finish, q); threadio->stop = true; - vb2_internal_streamoff(q, q->type); - call_void_qop(q, wait_prepare, q); + /* Wake up all pending sleeps in the thread */ + vb2_queue_error(q); err = kthread_stop(threadio->thread); - q->fileio = NULL; - fileio->req.count = 0; - vb2_reqbufs(q, &fileio->req); - kfree(fileio); + __vb2_cleanup_fileio(q); threadio->thread = NULL; kfree(threadio); - q->fileio = NULL; q->threadio = NULL; return err; } diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index b481d20c8372..69e0483adfee 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c @@ -632,8 +632,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, } /* extract page list from userspace mapping */ - ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, - dma_dir == DMA_FROM_DEVICE); + ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir); if (ret) { unsigned long pfn; if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index b1d583ba9674..334453fa7e9a 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -593,13 +593,12 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ? CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; new_state = max(tx_state, rx_state); - } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) { + } else { __flexcan_get_berr_counter(dev, &bec); - new_state = CAN_STATE_ERROR_PASSIVE; + new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ? + CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF; rx_state = bec.rxerr >= bec.txerr ? new_state : 0; tx_state = bec.rxerr <= bec.txerr ? new_state : 0; - } else { - new_state = CAN_STATE_BUS_OFF; } /* state hasn't changed */ @@ -1159,12 +1158,19 @@ static int flexcan_probe(struct platform_device *pdev) const struct flexcan_devtype_data *devtype_data; struct net_device *dev; struct flexcan_priv *priv; + struct regulator *reg_xceiver; struct resource *mem; struct clk *clk_ipg = NULL, *clk_per = NULL; void __iomem *base; int err, irq; u32 clock_freq = 0; + reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); + if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER) + return -EPROBE_DEFER; + else if (IS_ERR(reg_xceiver)) + reg_xceiver = NULL; + if (pdev->dev.of_node) of_property_read_u32(pdev->dev.of_node, "clock-frequency", &clock_freq); @@ -1226,9 +1232,7 @@ static int flexcan_probe(struct platform_device *pdev) priv->pdata = dev_get_platdata(&pdev->dev); priv->devtype_data = devtype_data; - priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); - if (IS_ERR(priv->reg_xceiver)) - priv->reg_xceiver = NULL; + priv->reg_xceiver = reg_xceiver; netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index cb366adc820b..f50a6bc5d06e 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -219,12 +219,15 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif) struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_vif *avp = (void *)vif->drv_priv; struct ath_buf *bf = avp->av_bcbuf; + struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon; ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n", avp->av_bslot); tasklet_disable(&sc->bcon_tasklet); + cur_conf->enable_beacon &= ~BIT(avp->av_bslot); + if (bf && bf->bf_mpdu) { struct sk_buff *skb = bf->bf_mpdu; dma_unmap_single(sc->dev, bf->bf_buf_addr, @@ -521,8 +524,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc, } if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { - if ((vif->type != NL80211_IFTYPE_AP) || - (sc->nbcnvifs > 1)) { + if (vif->type != NL80211_IFTYPE_AP) { ath_dbg(common, CONFIG, "An AP interface is already present !\n"); return false; @@ -616,12 +618,14 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif, * enabling/disabling SWBA. */ if (changed & BSS_CHANGED_BEACON_ENABLED) { - if (!bss_conf->enable_beacon && - (sc->nbcnvifs <= 1)) { - cur_conf->enable_beacon = false; - } else if (bss_conf->enable_beacon) { - cur_conf->enable_beacon = true; - ath9k_cache_beacon_config(sc, ctx, bss_conf); + bool enabled = cur_conf->enable_beacon; + + if (!bss_conf->enable_beacon) { + cur_conf->enable_beacon &= ~BIT(avp->av_bslot); + } else { + cur_conf->enable_beacon |= BIT(avp->av_bslot); + if (!enabled) + ath9k_cache_beacon_config(sc, ctx, bss_conf); } } diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h index 2b79a568e803..d23737342f4f 100644 --- a/drivers/net/wireless/ath/ath9k/common.h +++ b/drivers/net/wireless/ath/ath9k/common.h @@ -54,7 +54,7 @@ struct ath_beacon_config { u16 dtim_period; u16 bmiss_timeout; u8 dtim_count; - bool enable_beacon; + u8 enable_beacon; bool ibss_creator; u32 nexttbtt; u32 intval; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c index defb7a44e0bc..7748a1ccf14f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c @@ -126,7 +126,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan"); if (drvr->bus_if->wowl_supported) brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); - brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0); + if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID) + brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0); /* set chip related quirks */ switch (drvr->bus_if->chip) { diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h index a6f22c32a279..3811878ab9cd 100644 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/iwlwifi/dvm/dev.h @@ -708,7 +708,6 @@ struct iwl_priv { unsigned long reload_jiffies; int reload_count; bool ucode_loaded; - bool init_ucode_run; /* Don't run init uCode again */ u8 plcp_delta_threshold; diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index d5cee1530597..80b8094deed1 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c @@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv) if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len) return 0; - if (priv->init_ucode_run) - return 0; - iwl_init_notification_wait(&priv->notif_wait, &calib_wait, calib_complete, ARRAY_SIZE(calib_complete), iwlagn_wait_calib, priv); @@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv) */ ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, UCODE_CALIB_TIMEOUT); - if (!ret) - priv->init_ucode_run = true; goto out; diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index e25faacf58b7..a5186bb7c63e 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -1118,12 +1118,22 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) /*This is for new trx flow*/ struct rtl_tx_buffer_desc *pbuffer_desc = NULL; u8 temp_one = 1; + u8 *entry; memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); ring = &rtlpci->tx_ring[BEACON_QUEUE]; pskb = __skb_dequeue(&ring->queue); - if (pskb) + if (rtlpriv->use_new_trx_flow) + entry = (u8 *)(&ring->buffer_desc[ring->idx]); + else + entry = (u8 *)(&ring->desc[ring->idx]); + if (pskb) { + pci_unmap_single(rtlpci->pdev, + rtlpriv->cfg->ops->get_desc( + (u8 *)entry, true, HW_DESC_TXBUFF_ADDR), + pskb->len, PCI_DMA_TODEVICE); kfree_skb(pskb); + } /*NB: the beacon data buffer must be 32-bit aligned. */ pskb = ieee80211_beacon_get(hw, mac->vif); diff --git a/drivers/of/address.c b/drivers/of/address.c index ad2906919d45..78a7dcbec7d8 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -450,12 +450,17 @@ static struct of_bus *of_match_bus(struct device_node *np) return NULL; } -static int of_empty_ranges_quirk(void) +static int of_empty_ranges_quirk(struct device_node *np) { if (IS_ENABLED(CONFIG_PPC)) { - /* To save cycles, we cache the result */ + /* To save cycles, we cache the result for global "Mac" setting */ static int quirk_state = -1; + /* PA-SEMI sdc DT bug */ + if (of_device_is_compatible(np, "1682m-sdc")) + return true; + + /* Make quirk cached */ if (quirk_state < 0) quirk_state = of_machine_is_compatible("Power Macintosh") || @@ -490,7 +495,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus, * This code is only enabled on powerpc. --gcl */ ranges = of_get_property(parent, rprop, &rlen); - if (ranges == NULL && !of_empty_ranges_quirk()) { + if (ranges == NULL && !of_empty_ranges_quirk(parent)) { pr_debug("OF: no ranges; cannot translate\n"); return 1; } diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 17ca98657a28..aa2da59a154a 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -342,7 +342,7 @@ static const struct irq_domain_ops msi_domain_ops = { .map = dw_pcie_msi_map, }; -int __init dw_pcie_host_init(struct pcie_port *pp) +int dw_pcie_host_init(struct pcie_port *pp) { struct device_node *np = pp->dev->of_node; struct platform_device *pdev = to_platform_device(pp->dev); diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c index 866465fd3dbf..020d78890719 100644 --- a/drivers/pci/host/pcie-spear13xx.c +++ b/drivers/pci/host/pcie-spear13xx.c @@ -269,7 +269,7 @@ static struct pcie_host_ops spear13xx_pcie_host_ops = { .host_init = spear13xx_pcie_host_init, }; -static int __init spear13xx_add_pcie_port(struct pcie_port *pp, +static int spear13xx_add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -299,7 +299,7 @@ static int __init spear13xx_add_pcie_port(struct pcie_port *pp, return 0; } -static int __init spear13xx_pcie_probe(struct platform_device *pdev) +static int spear13xx_pcie_probe(struct platform_device *pdev) { struct spear13xx_pcie *spear13xx_pcie; struct pcie_port *pp; @@ -370,7 +370,7 @@ static const struct of_device_id spear13xx_pcie_of_match[] = { }; MODULE_DEVICE_TABLE(of, spear13xx_pcie_of_match); -static struct platform_driver spear13xx_pcie_driver __initdata = { +static struct platform_driver spear13xx_pcie_driver = { .probe = spear13xx_pcie_probe, .driver = { .name = "spear-pcie", diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index 7d48ecae6695..788db48dbbad 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c @@ -286,11 +286,12 @@ int cpci_configure_slot(struct slot *slot) } parent = slot->dev->bus; - list_for_each_entry(dev, &parent->devices, bus_list) + list_for_each_entry(dev, &parent->devices, bus_list) { if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn)) continue; if (pci_is_bridge(dev)) pci_hp_add_bridge(dev); + } pci_assign_unassigned_bridge_resources(parent->self); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 3542150fc8a3..26b37b663a1d 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -248,6 +248,9 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) acpi_handle handle, phandle; struct pci_bus *pbus; + if (acpi_pci_disabled) + return -ENODEV; + handle = NULL; for (pbus = dev->bus; pbus; pbus = pbus->parent) { handle = acpi_pci_get_bridge_handle(pbus); diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index c6849d9e86ce..167fe411ce2e 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c @@ -132,16 +132,8 @@ static const char *aer_agent_string[] = { static void __print_tlp_header(struct pci_dev *dev, struct aer_header_log_regs *t) { - unsigned char *tlp = (unsigned char *)&t; - - dev_err(&dev->dev, " TLP Header:" - " %02x%02x%02x%02x %02x%02x%02x%02x" - " %02x%02x%02x%02x %02x%02x%02x%02x\n", - *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, - *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), - *(tlp + 11), *(tlp + 10), *(tlp + 9), - *(tlp + 8), *(tlp + 15), *(tlp + 14), - *(tlp + 13), *(tlp + 12)); + dev_err(&dev->dev, " TLP Header: %08x %08x %08x %08x\n", + t->dw0, t->dw1, t->dw2, t->dw3); } static void __aer_print_error(struct pci_dev *dev, diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 9cc047bc763b..7a56f443ffc0 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -5735,9 +5735,9 @@ free_port: hba_free: if (phba->msix_enabled) pci_disable_msix(phba->pcidev); - iscsi_host_remove(phba->shost); pci_dev_put(phba->pcidev); iscsi_host_free(phba->shost); + pci_set_drvdata(pcidev, NULL); disable_pci: pci_disable_device(pcidev); return ret; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 17bb541f7cc2..71297017e499 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1311,9 +1311,11 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req) "rejecting I/O to dead device\n"); ret = BLKPREP_KILL; break; - case SDEV_QUIESCE: case SDEV_BLOCK: case SDEV_CREATED_BLOCK: + ret = BLKPREP_DEFER; + break; + case SDEV_QUIESCE: /* * If the devices is blocked we defer normal commands. */ diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 8d27db47560c..22d67c034cdf 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1185,7 +1185,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, * traditional iSCSI block I/O. */ if (iscsit_allocate_iovecs(cmd) < 0) { - return iscsit_add_reject_cmd(cmd, + return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); } immed_data = cmd->immediate_data; diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 4ddfa60c9222..6f8cf3a52861 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -247,8 +247,6 @@ static void n_tty_write_wakeup(struct tty_struct *tty) static void n_tty_check_throttle(struct tty_struct *tty) { - if (tty->driver->type == TTY_DRIVER_TYPE_PTY) - return; /* * Check the remaining room for the input canonicalization * mode. We don't want to throttle the driver if we're in @@ -1512,23 +1510,6 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag) n_tty_receive_char_flagged(tty, c, flag); } -/** - * n_tty_receive_buf - data receive - * @tty: terminal device - * @cp: buffer - * @fp: flag buffer - * @count: characters - * - * Called by the terminal driver when a block of characters has - * been received. This function must be called from soft contexts - * not from interrupt context. The driver is responsible for making - * calls one at a time and in order (or using flush_to_ldisc) - * - * n_tty_receive_buf()/producer path: - * claims non-exclusive termios_rwsem - * publishes read_head and canon_head - */ - static void n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) @@ -1684,24 +1665,85 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, } } +/** + * n_tty_receive_buf_common - process input + * @tty: device to receive input + * @cp: input chars + * @fp: flags for each char (if NULL, all chars are TTY_NORMAL) + * @count: number of input chars in @cp + * + * Called by the terminal driver when a block of characters has + * been received. This function must be called from soft contexts + * not from interrupt context. The driver is responsible for making + * calls one at a time and in order (or using flush_to_ldisc) + * + * Returns the # of input chars from @cp which were processed. + * + * In canonical mode, the maximum line length is 4096 chars (including + * the line termination char); lines longer than 4096 chars are + * truncated. After 4095 chars, input data is still processed but + * not stored. Overflow processing ensures the tty can always + * receive more input until at least one line can be read. + * + * In non-canonical mode, the read buffer will only accept 4095 chars; + * this provides the necessary space for a newline char if the input + * mode is switched to canonical. + * + * Note it is possible for the read buffer to _contain_ 4096 chars + * in non-canonical mode: the read buffer could already contain the + * maximum canon line of 4096 chars when the mode is switched to + * non-canonical. + * + * n_tty_receive_buf()/producer path: + * claims non-exclusive termios_rwsem + * publishes commit_head or canon_head + */ static int n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp, char *fp, int count, int flow) { struct n_tty_data *ldata = tty->disc_data; - int room, n, rcvd = 0; + int room, n, rcvd = 0, overflow; down_read(&tty->termios_rwsem); while (1) { - room = receive_room(tty); + /* + * When PARMRK is set, each input char may take up to 3 chars + * in the read buf; reduce the buffer space avail by 3x + * + * If we are doing input canonicalization, and there are no + * pending newlines, let characters through without limit, so + * that erase characters will be handled. Other excess + * characters will be beeped. + * + * paired with store in *_copy_from_read_buf() -- guarantees + * the consumer has loaded the data in read_buf up to the new + * read_tail (so this producer will not overwrite unread data) + */ + size_t tail = ldata->read_tail; + + room = N_TTY_BUF_SIZE - (ldata->read_head - tail); + if (I_PARMRK(tty)) + room = (room + 2) / 3; + room--; + if (room <= 0) { + overflow = ldata->icanon && ldata->canon_head == tail; + if (overflow && room < 0) + ldata->read_head--; + room = overflow; + ldata->no_room = flow && !room; + } else + overflow = 0; + n = min(count, room); - if (!n) { - if (flow && !room) - ldata->no_room = 1; + if (!n) break; - } - __receive_buf(tty, cp, fp, n); + + /* ignore parity errors if handling overflow */ + if (!overflow || !fp || *fp != TTY_PARITY) + __receive_buf(tty, cp, fp, n); + cp += n; if (fp) fp += n; @@ -1710,7 +1752,17 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp, } tty->receive_room = room; - n_tty_check_throttle(tty); + + /* Unthrottle if handling overflow on pty */ + if (tty->driver->type == TTY_DRIVER_TYPE_PTY) { + if (overflow) { + tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE); + tty_unthrottle_safe(tty); + __tty_set_flow_change(tty, 0); + } + } else + n_tty_check_throttle(tty); + up_read(&tty->termios_rwsem); return rcvd; diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index e95c4971327b..0f63b93d8fc6 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -910,6 +910,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport) writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE, sport->port.membase + UARTPFIFO); + /* explicitly clear RDRF */ + readb(sport->port.membase + UARTSR1); + /* flush Tx and Rx FIFO */ writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH, sport->port.membase + UARTCFIFO); @@ -1095,6 +1098,8 @@ static int lpuart_startup(struct uart_port *port) sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) & UARTPFIFO_FIFOSIZE_MASK) + 1); + sport->port.fifosize = sport->txfifo_size; + sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) & UARTPFIFO_FIFOSIZE_MASK) + 1); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index a7865c4b0498..0827d7c96527 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -387,6 +387,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, status = PORT_PLC; port_change_bit = "link state"; break; + case USB_PORT_FEAT_C_PORT_CONFIG_ERROR: + status = PORT_CEC; + port_change_bit = "config error"; + break; default: /* Should never happen */ return; @@ -588,6 +592,8 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, status |= USB_PORT_STAT_C_LINK_STATE << 16; if ((raw_port_status & PORT_WRC)) status |= USB_PORT_STAT_C_BH_RESET << 16; + if ((raw_port_status & PORT_CEC)) + status |= USB_PORT_STAT_C_CONFIG_ERROR << 16; } if (hcd->speed != HCD_USB3) { @@ -1005,6 +1011,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, case USB_PORT_FEAT_C_OVER_CURRENT: case USB_PORT_FEAT_C_ENABLE: case USB_PORT_FEAT_C_PORT_LINK_STATE: + case USB_PORT_FEAT_C_PORT_CONFIG_ERROR: xhci_clear_port_change_bit(xhci, wValue, wIndex, port_array[wIndex], temp); break; @@ -1069,7 +1076,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) */ status = bus_state->resuming_ports; - mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC; + mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC; spin_lock_irqsave(&xhci->lock, flags); /* For each port, did anything change? If so, set that bit in buf. */ diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index fd53c9ebd662..2af32e26fafc 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -115,6 +115,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_INTEL) { xhci->quirks |= XHCI_LPM_SUPPORT; xhci->quirks |= XHCI_INTEL_HOST; + xhci->quirks |= XHCI_AVOID_BEI; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { @@ -130,7 +131,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) * PPT chipsets. */ xhci->quirks |= XHCI_SPURIOUS_REBOOT; - xhci->quirks |= XHCI_AVOID_BEI; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 3086dec0ef53..8eb68a31cab6 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -604,6 +604,7 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, /* * ELV devices: */ @@ -1883,8 +1884,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial) { struct usb_device *udev = serial->dev; - if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) || - (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2"))) + if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) + return ftdi_jtag_probe(serial); + + if (udev->product && + (!strcmp(udev->product, "BeagleBone/XDS100V2") || + !strcmp(udev->product, "SNAP Connect E10"))) return ftdi_jtag_probe(serial); return 0; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 56b1b55c4751..4e4f46f3c89c 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -561,6 +561,12 @@ */ #define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ +/* + * Synapse Wireless product ids (FTDI_VID) + * http://www.synapse-wireless.com + */ +#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */ + /********************************/ /** third-party VID/PID combos **/ diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index dd97d8b572c3..4f7e072e4e00 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c @@ -61,6 +61,7 @@ struct keyspan_pda_private { /* For Xircom PGSDB9 and older Entrega version of the same device */ #define XIRCOM_VENDOR_ID 0x085a #define XIRCOM_FAKE_ID 0x8027 +#define XIRCOM_FAKE_ID_2 0x8025 /* "PGMFHUB" serial */ #define ENTREGA_VENDOR_ID 0x1645 #define ENTREGA_FAKE_ID 0x8093 @@ -70,6 +71,7 @@ static const struct usb_device_id id_table_combined[] = { #endif #ifdef XIRCOM { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, + { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) }, { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) }, #endif { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) }, @@ -93,6 +95,7 @@ static const struct usb_device_id id_table_fake[] = { #ifdef XIRCOM static const struct usb_device_id id_table_fake_xircom[] = { { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, + { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) }, { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) }, { } }; diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index b812462083fc..94d96809e686 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -55,6 +55,23 @@ config XEN_BALLOON_MEMORY_HOTPLUG In that case step 3 should be omitted. +config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT + int "Hotplugged memory limit (in GiB) for a PV guest" + default 512 if X86_64 + default 4 if X86_32 + range 0 64 if X86_32 + depends on XEN_HAVE_PVMMU + depends on XEN_BALLOON_MEMORY_HOTPLUG + help + Maxmium amount of memory (in GiB) that a PV guest can be + expanded to when using memory hotplug. + + A PV guest can have more memory than this limit if is + started with a larger maximum. + + This value is used to allocate enough space in internal + tables needed for physical memory administration. + config XEN_SCRUB_PAGES bool "Scrub pages before returning them to system" depends on XEN_BALLOON diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 3860d02729dc..a325814341b7 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -230,6 +230,29 @@ static enum bp_state reserve_additional_memory(long credit) balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION); nid = memory_add_physaddr_to_nid(hotplug_start_paddr); +#ifdef CONFIG_XEN_HAVE_PVMMU + /* + * add_memory() will build page tables for the new memory so + * the p2m must contain invalid entries so the correct + * non-present PTEs will be written. + * + * If a failure occurs, the original (identity) p2m entries + * are not restored since this region is now known not to + * conflict with any devices. + */ + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + unsigned long pfn, i; + + pfn = PFN_DOWN(hotplug_start_paddr); + for (i = 0; i < balloon_hotplug; i++) { + if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { + pr_warn("set_phys_to_machine() failed, no memory added\n"); + return BP_ECANCELED; + } + } + } +#endif + rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT); if (rc) { diff --git a/fs/aio.c b/fs/aio.c index c428871f1093..ebd0e9bf5323 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -740,6 +740,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) err_cleanup: aio_nr_sub(ctx->max_reqs); err_ctx: + atomic_set(&ctx->dead, 1); + if (ctx->mmap_size) + vm_munmap(ctx->mmap_base, ctx->mmap_size); aio_free_ring(ctx); err: mutex_unlock(&ctx->ring_lock); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 069ab24badaa..ef64903d6cb3 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1266,21 +1266,13 @@ out: } static int insert_orphan_item(struct btrfs_trans_handle *trans, - struct btrfs_root *root, u64 offset) + struct btrfs_root *root, u64 ino) { int ret; - struct btrfs_path *path; - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - ret = btrfs_find_item(root, path, BTRFS_ORPHAN_OBJECTID, - offset, BTRFS_ORPHAN_ITEM_KEY, NULL); - if (ret > 0) - ret = btrfs_insert_orphan_item(trans, root, offset); - - btrfs_free_path(path); + ret = btrfs_insert_orphan_item(trans, root, ino); + if (ret == -EEXIST) + ret = 0; return ret; } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 74f12877493a..3e30d920f85a 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1829,6 +1829,7 @@ refind_writable: cifsFileInfo_put(inv_file); spin_lock(&cifs_file_list_lock); ++refind; + inv_file = NULL; goto refind_writable; } } diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 96b5d40a2ece..eab05e1aa587 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -684,7 +684,8 @@ smb2_clone_range(const unsigned int xid, /* No need to change MaxChunks since already set to 1 */ chunk_sizes_updated = true; - } + } else + goto cchunk_out; } cchunk_out: diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 36b369697a13..5e7af1c69577 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -1393,10 +1393,7 @@ end_range: * to free. Everything was covered by the start * of the range. */ - return 0; - } else { - /* Shared branch grows from an indirect block */ - partial2--; + goto do_indirects; } } else { /* @@ -1427,56 +1424,96 @@ end_range: /* Punch happened within the same level (n == n2) */ partial = ext4_find_shared(inode, n, offsets, chain, &nr); partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2); - /* - * ext4_find_shared returns Indirect structure which - * points to the last element which should not be - * removed by truncate. But this is end of the range - * in punch_hole so we need to point to the next element - */ - partial2->p++; - while ((partial > chain) || (partial2 > chain2)) { - /* We're at the same block, so we're almost finished */ - if ((partial->bh && partial2->bh) && - (partial->bh->b_blocknr == partial2->bh->b_blocknr)) { - if ((partial > chain) && (partial2 > chain2)) { + + /* Free top, but only if partial2 isn't its subtree. */ + if (nr) { + int level = min(partial - chain, partial2 - chain2); + int i; + int subtree = 1; + + for (i = 0; i <= level; i++) { + if (offsets[i] != offsets2[i]) { + subtree = 0; + break; + } + } + + if (!subtree) { + if (partial == chain) { + /* Shared branch grows from the inode */ + ext4_free_branches(handle, inode, NULL, + &nr, &nr+1, + (chain+n-1) - partial); + *partial->p = 0; + } else { + /* Shared branch grows from an indirect block */ + BUFFER_TRACE(partial->bh, "get_write_access"); ext4_free_branches(handle, inode, partial->bh, - partial->p + 1, - partial2->p, + partial->p, + partial->p+1, (chain+n-1) - partial); - BUFFER_TRACE(partial->bh, "call brelse"); - brelse(partial->bh); - BUFFER_TRACE(partial2->bh, "call brelse"); - brelse(partial2->bh); } - return 0; } + } + + if (!nr2) { /* - * Clear the ends of indirect blocks on the shared branch - * at the start of the range + * ext4_find_shared returns Indirect structure which + * points to the last element which should not be + * removed by truncate. But this is end of the range + * in punch_hole so we need to point to the next element */ - if (partial > chain) { + partial2->p++; + } + + while (partial > chain || partial2 > chain2) { + int depth = (chain+n-1) - partial; + int depth2 = (chain2+n2-1) - partial2; + + if (partial > chain && partial2 > chain2 && + partial->bh->b_blocknr == partial2->bh->b_blocknr) { + /* + * We've converged on the same block. Clear the range, + * then we're done. + */ ext4_free_branches(handle, inode, partial->bh, - partial->p + 1, - (__le32 *)partial->bh->b_data+addr_per_block, - (chain+n-1) - partial); + partial->p + 1, + partial2->p, + (chain+n-1) - partial); BUFFER_TRACE(partial->bh, "call brelse"); brelse(partial->bh); - partial--; + BUFFER_TRACE(partial2->bh, "call brelse"); + brelse(partial2->bh); + return 0; } + /* - * Clear the ends of indirect blocks on the shared branch - * at the end of the range + * The start and end partial branches may not be at the same + * level even though the punch happened within one level. So, we + * give them a chance to arrive at the same level, then walk + * them in step with each other until we converge on the same + * block. */ - if (partial2 > chain2) { + if (partial > chain && depth <= depth2) { + ext4_free_branches(handle, inode, partial->bh, + partial->p + 1, + (__le32 *)partial->bh->b_data+addr_per_block, + (chain+n-1) - partial); + BUFFER_TRACE(partial->bh, "call brelse"); + brelse(partial->bh); + partial--; + } + if (partial2 > chain2 && depth2 <= depth) { ext4_free_branches(handle, inode, partial2->bh, (__le32 *)partial2->bh->b_data, partial2->p, - (chain2+n-1) - partial2); + (chain2+n2-1) - partial2); BUFFER_TRACE(partial2->bh, "call brelse"); brelse(partial2->bh); partial2--; } } + return 0; do_indirects: /* Kill the remaining (whole) subtrees */ diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 1685b82a9ccd..7cfb905a1e90 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3228,7 +3228,7 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, } else nfs4_free_openowner(&oo->oo_owner); spin_unlock(&clp->cl_lock); - return oo; + return ret; } static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { @@ -5065,7 +5065,7 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, } else nfs4_free_lockowner(&lo->lo_owner); spin_unlock(&clp->cl_lock); - return lo; + return ret; } static void diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 3950693dd0f6..7e5aa3908a84 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2374,10 +2374,14 @@ out_dio: /* buffered aio wouldn't have proper lock coverage today */ BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); + if (unlikely(written <= 0)) + goto no_sync; + if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || ((file->f_flags & O_DIRECT) && !direct_io)) { - ret = filemap_fdatawrite_range(file->f_mapping, *ppos, - *ppos + count - 1); + ret = filemap_fdatawrite_range(file->f_mapping, + iocb->ki_pos - written, + iocb->ki_pos - 1); if (ret < 0) written = ret; @@ -2388,10 +2392,12 @@ out_dio: } if (!ret) - ret = filemap_fdatawait_range(file->f_mapping, *ppos, - *ppos + count - 1); + ret = filemap_fdatawait_range(file->f_mapping, + iocb->ki_pos - written, + iocb->ki_pos - 1); } +no_sync: /* * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io * function pointer which is called when o_direct io completes so that diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 13e974e6a889..3dd03af5310d 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -360,7 +360,8 @@ STATIC int /* error (positive) */ xfs_zero_last_block( struct xfs_inode *ip, xfs_fsize_t offset, - xfs_fsize_t isize) + xfs_fsize_t isize, + bool *did_zeroing) { struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); @@ -388,6 +389,7 @@ xfs_zero_last_block( zero_len = mp->m_sb.sb_blocksize - zero_offset; if (isize + zero_len > offset) zero_len = offset - isize; + *did_zeroing = true; return xfs_iozero(ip, isize, zero_len); } @@ -406,7 +408,8 @@ int /* error (positive) */ xfs_zero_eof( struct xfs_inode *ip, xfs_off_t offset, /* starting I/O offset */ - xfs_fsize_t isize) /* current inode size */ + xfs_fsize_t isize, /* current inode size */ + bool *did_zeroing) { struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t start_zero_fsb; @@ -428,7 +431,7 @@ xfs_zero_eof( * We only zero a part of that block so it is handled specially. */ if (XFS_B_FSB_OFFSET(mp, isize) != 0) { - error = xfs_zero_last_block(ip, offset, isize); + error = xfs_zero_last_block(ip, offset, isize, did_zeroing); if (error) return error; } @@ -488,6 +491,7 @@ xfs_zero_eof( if (error) return error; + *did_zeroing = true; start_zero_fsb = imap.br_startoff + imap.br_blockcount; ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); } @@ -526,13 +530,15 @@ restart: * having to redo all checks before. */ if (*pos > i_size_read(inode)) { + bool zero = false; + if (*iolock == XFS_IOLOCK_SHARED) { xfs_rw_iunlock(ip, *iolock); *iolock = XFS_IOLOCK_EXCL; xfs_rw_ilock(ip, *iolock); goto restart; } - error = xfs_zero_eof(ip, *pos, i_size_read(inode)); + error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero); if (error) return error; } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 4ed2ba9342dc..9176c4e728e1 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -377,8 +377,9 @@ int xfs_droplink(struct xfs_trans *, struct xfs_inode *); int xfs_bumplink(struct xfs_trans *, struct xfs_inode *); /* from xfs_file.c */ -int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t); -int xfs_iozero(struct xfs_inode *, loff_t, size_t); +int xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset, + xfs_fsize_t isize, bool *did_zeroing); +int xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count); #define IHOLD(ip) \ diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index c50311cae1b1..17d057c8eb43 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -741,6 +741,7 @@ xfs_setattr_size( int error; uint lock_flags = 0; uint commit_flags = 0; + bool did_zeroing = false; trace_xfs_setattr(ip); @@ -784,20 +785,16 @@ xfs_setattr_size( return error; /* - * Now we can make the changes. Before we join the inode to the - * transaction, take care of the part of the truncation that must be - * done without the inode lock. This needs to be done before joining - * the inode to the transaction, because the inode cannot be unlocked - * once it is a part of the transaction. + * File data changes must be complete before we start the transaction to + * modify the inode. This needs to be done before joining the inode to + * the transaction because the inode cannot be unlocked once it is a + * part of the transaction. + * + * Start with zeroing any data block beyond EOF that we may expose on + * file extension. */ if (newsize > oldsize) { - /* - * Do the first part of growing a file: zero any data in the - * last block that is beyond the old EOF. We need to do this - * before the inode is joined to the transaction to modify - * i_size. - */ - error = xfs_zero_eof(ip, newsize, oldsize); + error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing); if (error) return error; } @@ -807,23 +804,18 @@ xfs_setattr_size( * any previous writes that are beyond the on disk EOF and the new * EOF that have not been written out need to be written here. If we * do not write the data out, we expose ourselves to the null files - * problem. - * - * Only flush from the on disk size to the smaller of the in memory - * file size or the new size as that's the range we really care about - * here and prevents waiting for other data not within the range we - * care about here. + * problem. Note that this includes any block zeroing we did above; + * otherwise those blocks may not be zeroed after a crash. */ - if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) { + if (newsize > ip->i_d.di_size && + (oldsize != ip->i_d.di_size || did_zeroing)) { error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ip->i_d.di_size, newsize); if (error) return error; } - /* - * Wait for all direct I/O to complete. - */ + /* Now wait for all direct I/O to complete. */ inode_dio_wait(inode); /* diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index c294e3e25e37..a1b25e35ea5f 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -181,7 +181,9 @@ enum rq_flag_bits { __REQ_ELVPRIV, /* elevator private data attached */ __REQ_FAILED, /* set if the request failed */ __REQ_QUIET, /* don't worry about errors */ - __REQ_PREEMPT, /* set for "ide_preempt" requests */ + __REQ_PREEMPT, /* set for "ide_preempt" requests and also + for requests for which the SCSI "quiesce" + state must be ignored. */ __REQ_ALLOCED, /* request came from our alloc pool */ __REQ_COPY_USER, /* contains copies of user pages */ __REQ_FLUSH_SEQ, /* request for flush sequence */ diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index ab70f3bc44ad..948df62ee5b7 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -68,7 +68,6 @@ struct cpuidle_device { unsigned int cpu; int last_residency; - int state_count; struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; struct cpuidle_driver_kobj *kobj_driver; diff --git a/include/linux/lcm.h b/include/linux/lcm.h index 7bf01d779b45..1ce79a7f1daa 100644 --- a/include/linux/lcm.h +++ b/include/linux/lcm.h @@ -4,5 +4,6 @@ #include unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__; +unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__; #endif /* _LCM_H */ diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index c57d8ea0716c..59a7889e15db 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h @@ -60,17 +60,17 @@ struct rpc_xprt; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) void rpc_register_sysctl(void); void rpc_unregister_sysctl(void); -int sunrpc_debugfs_init(void); +void sunrpc_debugfs_init(void); void sunrpc_debugfs_exit(void); -int rpc_clnt_debugfs_register(struct rpc_clnt *); +void rpc_clnt_debugfs_register(struct rpc_clnt *); void rpc_clnt_debugfs_unregister(struct rpc_clnt *); -int rpc_xprt_debugfs_register(struct rpc_xprt *); +void rpc_xprt_debugfs_register(struct rpc_xprt *); void rpc_xprt_debugfs_unregister(struct rpc_xprt *); #else -static inline int +static inline void sunrpc_debugfs_init(void) { - return 0; + return; } static inline void @@ -79,10 +79,10 @@ sunrpc_debugfs_exit(void) return; } -static inline int +static inline void rpc_clnt_debugfs_register(struct rpc_clnt *clnt) { - return 0; + return; } static inline void @@ -91,10 +91,10 @@ rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt) return; } -static inline int +static inline void rpc_xprt_debugfs_register(struct rpc_xprt *xprt) { - return 0; + return; } static inline void diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 0c40c16174b4..a314afdbeb5c 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -955,25 +955,6 @@ static void mark_nosave_pages(struct memory_bitmap *bm) } } -static bool is_nosave_page(unsigned long pfn) -{ - struct nosave_region *region; - - list_for_each_entry(region, &nosave_regions, list) { - if (pfn >= region->start_pfn && pfn < region->end_pfn) { - pr_err("PM: %#010llx in e820 nosave region: " - "[mem %#010llx-%#010llx]\n", - (unsigned long long) pfn << PAGE_SHIFT, - (unsigned long long) region->start_pfn << PAGE_SHIFT, - ((unsigned long long) region->end_pfn << PAGE_SHIFT) - - 1); - return true; - } - } - - return false; -} - /** * create_basic_memory_bitmaps - create bitmaps needed for marking page * frames that should not be saved and free page frames. The pointers @@ -2039,7 +2020,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm) do { pfn = memory_bm_next_pfn(bm); if (likely(pfn != BM_END_OF_MAP)) { - if (likely(pfn_valid(pfn)) && !is_nosave_page(pfn)) + if (likely(pfn_valid(pfn))) swsusp_set_page_free(pfn_to_page(pfn)); else return -EFAULT; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 44dfc8b46bd0..d400c827a94e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3079,6 +3079,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio) } else { if (dl_prio(oldprio)) p->dl.dl_boosted = 0; + if (rt_prio(oldprio)) + p->rt.timeout = 0; p->sched_class = &fair_sched_class; } diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index eb682d5c697c..6aac4beedbbe 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c @@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode, */ static int bc_set_next(ktime_t expires, struct clock_event_device *bc) { + int bc_moved; /* * We try to cancel the timer first. If the callback is on * flight on some other cpu then we let it handle it. If we @@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc) * restart the timer because we are in the callback, but we * can set the expiry time and let the callback return * HRTIMER_RESTART. + * + * Since we are in the idle loop at this point and because + * hrtimer_{start/cancel} functions call into tracing, + * calls to these functions must be bound within RCU_NONIDLE. */ - if (hrtimer_try_to_cancel(&bctimer) >= 0) { - hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED); + RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ? + !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) : + 0); + if (bc_moved) { /* Bind the "device" to the cpu */ bc->bound_on = smp_processor_id(); } else if (bc->bound_on == smp_processor_id()) { diff --git a/lib/lcm.c b/lib/lcm.c index 51cc6b13cd52..399643c20880 100644 --- a/lib/lcm.c +++ b/lib/lcm.c @@ -12,3 +12,14 @@ unsigned long lcm(unsigned long a, unsigned long b) return 0; } EXPORT_SYMBOL_GPL(lcm); + +unsigned long lcm_not_zero(unsigned long a, unsigned long b) +{ + unsigned long l = lcm(a, b); + + if (l) + return l; + + return (b ? : a); +} +EXPORT_SYMBOL_GPL(lcm_not_zero); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9fab10795bea..65842d688b7c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1092,6 +1092,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) return NULL; arch_refresh_nodedata(nid, pgdat); + } else { + /* Reset the nr_zones and classzone_idx to 0 before reuse */ + pgdat->nr_zones = 0; + pgdat->classzone_idx = 0; } /* we can use NODE_DATA(nid) from here */ @@ -1977,15 +1981,6 @@ void try_offline_node(int nid) if (is_vmalloc_addr(zone->wait_table)) vfree(zone->wait_table); } - - /* - * Since there is no way to guarentee the address of pgdat/zone is not - * on stack of any kernel threads or used by other kernel objects - * without reference counting or other symchronizing method, do not - * reset node_data and free pgdat here. Just reset it to 0 and reuse - * the memory when the node is online again. - */ - memset(pgdat, 0, sizeof(*pgdat)); } EXPORT_SYMBOL(try_offline_node); diff --git a/mm/mmap.c b/mm/mmap.c index e5cc3ca1d869..0bc66f1e9637 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -780,10 +780,8 @@ again: remove_next = 1 + (end > next->vm_end); importer->anon_vma = exporter->anon_vma; error = anon_vma_clone(importer, exporter); - if (error) { - importer->anon_vma = NULL; + if (error) return error; - } } } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6f4335238e33..f24d4c9217a7 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, * bw * elapsed + write_bandwidth * (period - elapsed) * write_bandwidth = --------------------------------------------------- * period + * + * @written may have decreased due to account_page_redirty(). + * Avoid underflowing @bw calculation. */ - bw = written - bdi->written_stamp; + bw = written - min(written, bdi->written_stamp); bw *= HZ; if (unlikely(elapsed > period)) { do_div(bw, elapsed); @@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh, unsigned long now) { static DEFINE_SPINLOCK(dirty_lock); - static unsigned long update_time; + static unsigned long update_time = INITIAL_JIFFIES; /* * check locklessly first to optimize away locking for the most time diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 72f5ac381ab3..755a42c76eb4 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -103,6 +103,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) if (!is_migrate_isolate_page(buddy)) { __isolate_free_page(page, order); + kernel_map_pages(page, (1 << order), 1); set_page_refcounted(page); isolated_page = page; } diff --git a/mm/rmap.c b/mm/rmap.c index 71cd5bd0c17d..ecb444a2ad50 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -287,6 +287,13 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) return 0; enomem_failure: + /* + * dst->anon_vma is dropped here otherwise its degree can be incorrectly + * decremented in unlink_anon_vmas(). + * We can safely do this because callers of anon_vma_clone() don't care + * about dst->anon_vma if anon_vma_clone() failed. + */ + dst->anon_vma = NULL; unlink_anon_vmas(dst); return -ENOMEM; } diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 33a2f201e460..74d30ec7b234 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -484,7 +484,7 @@ static int ceph_tcp_connect(struct ceph_connection *con) IPPROTO_TCP, &sock); if (ret) return ret; - sock->sk->sk_allocation = GFP_NOFS | __GFP_MEMALLOC; + sock->sk->sk_allocation = GFP_NOFS; #ifdef CONFIG_LOCKDEP lockdep_set_class(&sock->sk->sk_lock, &socket_class); @@ -510,8 +510,6 @@ static int ceph_tcp_connect(struct ceph_connection *con) return ret; } - sk_set_memalloc(sock->sk); - con->sock = sock; return 0; } @@ -2798,11 +2796,8 @@ static void con_work(struct work_struct *work) { struct ceph_connection *con = container_of(work, struct ceph_connection, work.work); - unsigned long pflags = current->flags; bool fault; - current->flags |= PF_MEMALLOC; - mutex_lock(&con->mutex); while (true) { int ret; @@ -2856,8 +2851,6 @@ static void con_work(struct work_struct *work) con_fault_finish(con); con->ops->put(con); - - tsk_restore_flags(current, pflags, PF_MEMALLOC); } /* diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index a48bad468880..7702978a4c99 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -49,8 +49,6 @@ static void ieee80211_free_tid_rx(struct rcu_head *h) container_of(h, struct tid_ampdu_rx, rcu_head); int i; - del_timer_sync(&tid_rx->reorder_timer); - for (i = 0; i < tid_rx->buf_size; i++) __skb_queue_purge(&tid_rx->reorder_buf[i]); kfree(tid_rx->reorder_buf); @@ -93,6 +91,12 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, del_timer_sync(&tid_rx->session_timer); + /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */ + spin_lock_bh(&tid_rx->reorder_lock); + tid_rx->removed = true; + spin_unlock_bh(&tid_rx->reorder_lock); + del_timer_sync(&tid_rx->reorder_timer); + call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index b448e8ff3213..909913d34d28 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -870,9 +870,10 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, set_release_timer: - mod_timer(&tid_agg_rx->reorder_timer, - tid_agg_rx->reorder_time[j] + 1 + - HT_RX_REORDER_BUF_TIMEOUT); + if (!tid_agg_rx->removed) + mod_timer(&tid_agg_rx->reorder_timer, + tid_agg_rx->reorder_time[j] + 1 + + HT_RX_REORDER_BUF_TIMEOUT); } else { del_timer(&tid_agg_rx->reorder_timer); } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 4f052bb2a5ad..075fd7858f2f 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -175,6 +175,7 @@ struct tid_ampdu_tx { * @reorder_lock: serializes access to reorder buffer, see below. * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and * and ssn. + * @removed: this session is removed (but might have been found due to RCU) * * This structure's lifetime is managed by RCU, assignments to * the array holding it must hold the aggregation mutex. @@ -199,6 +200,7 @@ struct tid_ampdu_rx { u16 timeout; u8 dialog_token; bool auto_seq; + bool removed; }; /** diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 3f5d4d48f0cb..86e6cc5c0953 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -303,9 +303,7 @@ static int rpc_client_register(struct rpc_clnt *clnt, struct super_block *pipefs_sb; int err; - err = rpc_clnt_debugfs_register(clnt); - if (err) - return err; + rpc_clnt_debugfs_register(clnt); pipefs_sb = rpc_get_sb_net(net); if (pipefs_sb) { diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c index e811f390f9f6..82962f7e6e88 100644 --- a/net/sunrpc/debugfs.c +++ b/net/sunrpc/debugfs.c @@ -129,48 +129,52 @@ static const struct file_operations tasks_fops = { .release = tasks_release, }; -int +void rpc_clnt_debugfs_register(struct rpc_clnt *clnt) { - int len, err; + int len; char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */ + struct rpc_xprt *xprt; /* Already registered? */ - if (clnt->cl_debugfs) - return 0; + if (clnt->cl_debugfs || !rpc_clnt_dir) + return; len = snprintf(name, sizeof(name), "%x", clnt->cl_clid); if (len >= sizeof(name)) - return -EINVAL; + return; /* make the per-client dir */ clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir); if (!clnt->cl_debugfs) - return -ENOMEM; + return; /* make tasks file */ - err = -ENOMEM; if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs, clnt, &tasks_fops)) goto out_err; - err = -EINVAL; rcu_read_lock(); + xprt = rcu_dereference(clnt->cl_xprt); + /* no "debugfs" dentry? Don't bother with the symlink. */ + if (!xprt->debugfs) { + rcu_read_unlock(); + return; + } len = snprintf(name, sizeof(name), "../../rpc_xprt/%s", - rcu_dereference(clnt->cl_xprt)->debugfs->d_name.name); + xprt->debugfs->d_name.name); rcu_read_unlock(); + if (len >= sizeof(name)) goto out_err; - err = -ENOMEM; if (!debugfs_create_symlink("xprt", clnt->cl_debugfs, name)) goto out_err; - return 0; + return; out_err: debugfs_remove_recursive(clnt->cl_debugfs); clnt->cl_debugfs = NULL; - return err; } void @@ -226,33 +230,33 @@ static const struct file_operations xprt_info_fops = { .release = xprt_info_release, }; -int +void rpc_xprt_debugfs_register(struct rpc_xprt *xprt) { int len, id; static atomic_t cur_id; char name[9]; /* 8 hex digits + NULL term */ + if (!rpc_xprt_dir) + return; + id = (unsigned int)atomic_inc_return(&cur_id); len = snprintf(name, sizeof(name), "%x", id); if (len >= sizeof(name)) - return -EINVAL; + return; /* make the per-client dir */ xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir); if (!xprt->debugfs) - return -ENOMEM; + return; /* make tasks file */ if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs, xprt, &xprt_info_fops)) { debugfs_remove_recursive(xprt->debugfs); xprt->debugfs = NULL; - return -ENOMEM; } - - return 0; } void @@ -266,14 +270,17 @@ void __exit sunrpc_debugfs_exit(void) { debugfs_remove_recursive(topdir); + topdir = NULL; + rpc_clnt_dir = NULL; + rpc_xprt_dir = NULL; } -int __init +void __init sunrpc_debugfs_init(void) { topdir = debugfs_create_dir("sunrpc", NULL); if (!topdir) - goto out; + return; rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir); if (!rpc_clnt_dir) @@ -283,10 +290,9 @@ sunrpc_debugfs_init(void) if (!rpc_xprt_dir) goto out_remove; - return 0; + return; out_remove: debugfs_remove_recursive(topdir); topdir = NULL; -out: - return -ENOMEM; + rpc_clnt_dir = NULL; } diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index e37fbed87956..ee5d3d253102 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -98,10 +98,7 @@ init_sunrpc(void) if (err) goto out4; - err = sunrpc_debugfs_init(); - if (err) - goto out5; - + sunrpc_debugfs_init(); #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) rpc_register_sysctl(); #endif @@ -109,8 +106,6 @@ init_sunrpc(void) init_socket_xprt(); /* clnt sock transport */ return 0; -out5: - unregister_rpc_pipefs(); out4: unregister_pernet_subsys(&sunrpc_net_ops); out3: diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index ebbefad21a37..5dcf7eac30a6 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1303,7 +1303,6 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net) */ struct rpc_xprt *xprt_create_transport(struct xprt_create *args) { - int err; struct rpc_xprt *xprt; struct xprt_class *t; @@ -1344,11 +1343,7 @@ found: return ERR_PTR(-ENOMEM); } - err = rpc_xprt_debugfs_register(xprt); - if (err) { - xprt_destroy(xprt); - return ERR_PTR(err); - } + rpc_xprt_debugfs_register(xprt); dprintk("RPC: created transport %p with %u slots\n", xprt, xprt->max_reqs); diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 33db1ad4fd10..138949a31eab 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf, goto out; /* No partial writes. */ - length = EINVAL; + length = -EINVAL; if (*ppos != 0) goto out; diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c index a422aaa3bb0c..9ee25a63f684 100644 --- a/sound/firewire/bebob/bebob_maudio.c +++ b/sound/firewire/bebob/bebob_maudio.c @@ -96,10 +96,10 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit) struct fw_device *device = fw_parent_device(unit); int err, rcode; u64 date; - __be32 cues[3] = { - MAUDIO_BOOTLOADER_CUE1, - MAUDIO_BOOTLOADER_CUE2, - MAUDIO_BOOTLOADER_CUE3 + __le32 cues[3] = { + cpu_to_le32(MAUDIO_BOOTLOADER_CUE1), + cpu_to_le32(MAUDIO_BOOTLOADER_CUE2), + cpu_to_le32(MAUDIO_BOOTLOADER_CUE3) }; /* check date of software used to build */ diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index d3e2fc700c5d..172c89996d74 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -394,7 +394,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on) { /* We currently only handle front, HP */ static hda_nid_t pins[] = { - 0x0f, 0x10, 0x14, 0x15, 0 + 0x0f, 0x10, 0x14, 0x15, 0x17, 0 }; hda_nid_t *p; for (p = pins; *p; p++) @@ -2910,6 +2910,8 @@ static void alc283_init(struct hda_codec *codec) if (!hp_pin) return; + + msleep(30); hp_pin_sense = snd_hda_jack_detect(codec, hp_pin); /* Index 0x43 Direct Drive HP AMP LPM Control 1 */ @@ -4289,6 +4291,7 @@ enum { ALC269_FIXUP_QUANTA_MUTE, ALC269_FIXUP_LIFEBOOK, ALC269_FIXUP_LIFEBOOK_EXTMIC, + ALC269_FIXUP_LIFEBOOK_HP_PIN, ALC269_FIXUP_AMIC, ALC269_FIXUP_DMIC, ALC269VB_FIXUP_AMIC, @@ -4440,6 +4443,13 @@ static const struct hda_fixup alc269_fixups[] = { { } }, }, + [ALC269_FIXUP_LIFEBOOK_HP_PIN] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x21, 0x0221102f }, /* HP out */ + { } + }, + }, [ALC269_FIXUP_AMIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -4915,6 +4925,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX), SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), + SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC), @@ -4941,6 +4952,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index dc9df007d3e3..337c317ead6f 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -192,6 +192,7 @@ static const struct rc_config { { USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */ { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */ { USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ + { USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */ }; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ec83b11c5978..0fba701bc518 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -466,7 +466,7 @@ static struct kvm *kvm_create_vm(unsigned long type) BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); r = -ENOMEM; - kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); + kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots)); if (!kvm->memslots) goto out_err_no_srcu; @@ -517,7 +517,7 @@ out_err_no_srcu: out_err_no_disable: for (i = 0; i < KVM_NR_BUSES; i++) kfree(kvm->buses[i]); - kfree(kvm->memslots); + kvfree(kvm->memslots); kvm_arch_free_vm(kvm); return ERR_PTR(r); } @@ -573,7 +573,7 @@ static void kvm_free_physmem(struct kvm *kvm) kvm_for_each_memslot(memslot, slots) kvm_free_physmem_slot(kvm, memslot, NULL); - kfree(kvm->memslots); + kvfree(kvm->memslots); } static void kvm_destroy_devices(struct kvm *kvm) @@ -865,10 +865,10 @@ int __kvm_set_memory_region(struct kvm *kvm, goto out_free; } - slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), - GFP_KERNEL); + slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); if (!slots) goto out_free; + memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { slot = id_to_memslot(slots, mem->slot); @@ -911,7 +911,7 @@ int __kvm_set_memory_region(struct kvm *kvm, kvm_arch_commit_memory_region(kvm, mem, &old, change); kvm_free_physmem_slot(kvm, &old, &new); - kfree(old_memslots); + kvfree(old_memslots); /* * IOMMU mapping: New slots need to be mapped. Old slots need to be @@ -930,7 +930,7 @@ int __kvm_set_memory_region(struct kvm *kvm, return 0; out_slots: - kfree(slots); + kvfree(slots); out_free: kvm_free_physmem_slot(kvm, &new, &old); out: