diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt index e742d21dbd96..a69ffe1d54d5 100644 --- a/Documentation/cpu-freq/intel-pstate.txt +++ b/Documentation/cpu-freq/intel-pstate.txt @@ -15,10 +15,13 @@ New sysfs files for controlling P state selection have been added to /sys/devices/system/cpu/intel_pstate/ max_perf_pct: limits the maximum P state that will be requested by - the driver stated as a percentage of the available performance. + the driver stated as a percentage of the available performance. The + available (P states) performance may be reduced by the no_turbo + setting described below. min_perf_pct: limits the minimum P state that will be requested by - the driver stated as a percentage of the available performance. + the driver stated as a percentage of the max (non-turbo) + performance level. no_turbo: limits the driver to selecting P states below the turbo frequency range. diff --git a/Makefile b/Makefile index 13d8f323ae43..7a2981c972ae 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 14 -SUBLEVEL = 12 +SUBLEVEL = 13 EXTRAVERSION = NAME = Remembering Coco diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 9dc5dc39fded..11ad59b856c6 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -56,6 +56,8 @@ #define TASK_SIZE_32 UL(0x100000000) #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ TASK_SIZE_32 : TASK_SIZE_64) +#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) #else #define TASK_SIZE TASK_SIZE_64 #endif /* CONFIG_COMPAT */ diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h index 3be8581af495..a8a37477c66e 100644 --- a/arch/metag/include/asm/processor.h +++ b/arch/metag/include/asm/processor.h @@ -23,7 +23,7 @@ #define STACK_TOP (TASK_SIZE - PAGE_SIZE) #define STACK_TOP_MAX STACK_TOP /* Maximum virtual space for stack */ -#define STACK_SIZE_MAX (1 << 28) /* 256 MB */ +#define STACK_SIZE_MAX (CONFIG_MAX_STACK_SIZE_MB*1024*1024) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 86522ef09d52..d951c9681ab3 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -55,7 +55,10 @@ #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX DEFAULT_TASK_SIZE -#define STACK_SIZE_MAX (1 << 30) /* 1 GB */ +/* Allow bigger stacks for 64-bit processes */ +#define STACK_SIZE_MAX (USER_WIDE_MODE \ + ? (1 << 30) /* 1 GB */ \ + : (CONFIG_MAX_STACK_SIZE_MB*1024*1024)) #endif diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c index 608716f8496b..af3bc359dc70 100644 --- a/arch/parisc/kernel/hardware.c +++ b/arch/parisc/kernel/hardware.c @@ -1210,7 +1210,8 @@ static struct hp_hardware hp_hardware_list[] = { {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, - {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"}, + {HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"}, + {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"}, {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 31ffa9b55322..e1ffea2f9a0b 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -72,10 +72,10 @@ static unsigned long mmap_upper_limit(void) { unsigned long stack_base; - /* Limit stack size to 1GB - see setup_arg_pages() in fs/exec.c */ + /* Limit stack size - see setup_arg_pages() in fs/exec.c */ stack_base = rlimit_max(RLIMIT_STACK); - if (stack_base > (1 << 30)) - stack_base = 1 << 30; + if (stack_base > STACK_SIZE_MAX) + stack_base = STACK_SIZE_MAX; return PAGE_ALIGN(STACK_TOP - stack_base); } diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index bb9f3b64de55..ec741fe02ab6 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c @@ -4,6 +4,7 @@ * Copyright (C) 2000-2001 Hewlett Packard Company * Copyright (C) 2000 John Marvin * Copyright (C) 2001 Matthew Wilcox + * Copyright (C) 2014 Helge Deller * * These routines maintain argument size conversion between 32bit and 64bit * environment. Based heavily on sys_ia32.c and sys_sparc32.c. @@ -57,3 +58,12 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, current->comm, current->pid, r20); return -ENOSYS; } + +asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags, + compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd, + const char __user * pathname) +{ + return sys_fanotify_mark(fanotify_fd, flags, + ((__u64)mask1 << 32) | mask0, + dfd, pathname); +} diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 83ead0ea127d..7dd8a3b22147 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -418,7 +418,7 @@ ENTRY_SAME(accept4) /* 320 */ ENTRY_SAME(prlimit64) ENTRY_SAME(fanotify_init) - ENTRY_COMP(fanotify_mark) + ENTRY_DIFF(fanotify_mark) ENTRY_COMP(clock_adjtime) ENTRY_SAME(name_to_handle_at) /* 325 */ ENTRY_COMP(open_by_handle_at) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 957bf344c0f5..2156fa2d25fe 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -410,7 +410,7 @@ config KEXEC config CRASH_DUMP bool "Build a kdump crash kernel" depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP) - select RELOCATABLE if PPC64 || 44x || FSL_BOOKE + select RELOCATABLE if (PPC64 && !COMPILE_TEST) || 44x || FSL_BOOKE help Build a kernel suitable for use as a kdump capture kernel. The same kernel binary can be used as production kernel and dump @@ -1000,6 +1000,7 @@ endmenu if PPC64 config RELOCATABLE bool "Build a relocatable kernel" + depends on !COMPILE_TEST select NONSTATIC_KERNEL help This builds a kernel image that is capable of running anywhere diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 3fd2f1b6f906..cefc7b4f4fb1 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -60,8 +60,7 @@ struct power_pmu { #define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */ #define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ #define PPMU_HAS_SIER 0x00000040 /* Has SIER */ -#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */ -#define PPMU_EBB 0x00000100 /* supports event based branch */ +#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ /* * Values for flags to get_alternatives() diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 67cf22083f4c..38265dc85318 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -483,7 +483,7 @@ static bool is_ebb_event(struct perf_event *event) * check that the PMU supports EBB, meaning those that don't can still * use bit 63 of the event code for something else if they wish. */ - return (ppmu->flags & PPMU_EBB) && + return (ppmu->flags & PPMU_ARCH_207S) && ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); } @@ -851,7 +851,22 @@ static void power_pmu_read(struct perf_event *event) } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); local64_add(delta, &event->count); - local64_sub(delta, &event->hw.period_left); + + /* + * A number of places program the PMC with (0x80000000 - period_left). + * We never want period_left to be less than 1 because we will program + * the PMC with a value >= 0x800000000 and an edge detected PMC will + * roll around to 0 before taking an exception. We have seen this + * on POWER8. + * + * To fix this, clamp the minimum value of period_left to 1. + */ + do { + prev = local64_read(&event->hw.period_left); + val = prev - delta; + if (val < 1) + val = 1; + } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); } /* @@ -1152,6 +1167,9 @@ static void power_pmu_enable(struct pmu *pmu) write_mmcr0(cpuhw, mmcr0); + if (ppmu->flags & PPMU_ARCH_207S) + mtspr(SPRN_MMCR2, 0); + /* * Enable instruction sampling if necessary */ @@ -1548,7 +1566,7 @@ static int power_pmu_event_init(struct perf_event *event) if (has_branch_stack(event)) { /* PMU has BHRB enabled */ - if (!(ppmu->flags & PPMU_BHRB)) + if (!(ppmu->flags & PPMU_ARCH_207S)) return -EOPNOTSUPP; } diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index 96cee20dcd34..4a06530cbb71 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -751,7 +751,7 @@ static struct power_pmu power8_pmu = { .get_constraint = power8_get_constraint, .get_alternatives = power8_get_alternatives, .disable_pmc = power8_disable_pmc, - .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, + .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S, .n_generic = ARRAY_SIZE(power8_generic_events), .generic_events = power8_generic_events, .cache_events = &power8_cache_events, diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c index f30cd10293f0..8626b03e83b7 100644 --- a/arch/x86/crypto/sha512_ssse3_glue.c +++ b/arch/x86/crypto/sha512_ssse3_glue.c @@ -141,7 +141,7 @@ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out) /* save number of bits */ bits[1] = cpu_to_be64(sctx->count[0] << 3); - bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61; + bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); /* Pad out to 112 mod 128 and append length */ index = sctx->count[0] & 0x7f; diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 799580cabc78..94bd24771812 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -50,6 +50,21 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, return err; } +static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, + void *arg) +{ + unsigned long i; + + for (i = 0; i < nr_pages; ++i) + if (pfn_valid(start_pfn + i) && + !PageReserved(pfn_to_page(start_pfn + i))) + return 1; + + WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); + + return 0; +} + /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses @@ -93,14 +108,11 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, /* * Don't allow anybody to remap normal RAM that we're using.. */ + pfn = phys_addr >> PAGE_SHIFT; last_pfn = last_addr >> PAGE_SHIFT; - for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) { - int is_ram = page_is_ram(pfn); - - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) - return NULL; - WARN_ON_ONCE(is_ram); - } + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, + __ioremap_check_ram) == 1) + return NULL; /* * Mappings have to be page-aligned diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 7c1f8452918a..0bebc6905383 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -30,6 +30,10 @@ #include #include #include +#ifdef CONFIG_ACPI_PROCFS_POWER +#include +#include +#endif #include #include #include @@ -51,6 +55,7 @@ MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI AC Adapter Driver"); MODULE_LICENSE("GPL"); + static int acpi_ac_add(struct acpi_device *device); static int acpi_ac_remove(struct acpi_device *device); static void acpi_ac_notify(struct acpi_device *device, u32 event); @@ -66,6 +71,13 @@ static int acpi_ac_resume(struct device *dev); #endif static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); +#ifdef CONFIG_ACPI_PROCFS_POWER +extern struct proc_dir_entry *acpi_lock_ac_dir(void); +extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); +static int acpi_ac_open_fs(struct inode *inode, struct file *file); +#endif + + static int ac_sleep_before_get_state_ms; static struct acpi_driver acpi_ac_driver = { @@ -89,6 +101,16 @@ struct acpi_ac { #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger) +#ifdef CONFIG_ACPI_PROCFS_POWER +static const struct file_operations acpi_ac_fops = { + .owner = THIS_MODULE, + .open = acpi_ac_open_fs, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + /* -------------------------------------------------------------------------- AC Adapter Management -------------------------------------------------------------------------- */ @@ -141,6 +163,83 @@ static enum power_supply_property ac_props[] = { POWER_SUPPLY_PROP_ONLINE, }; +#ifdef CONFIG_ACPI_PROCFS_POWER +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +static struct proc_dir_entry *acpi_ac_dir; + +static int acpi_ac_seq_show(struct seq_file *seq, void *offset) +{ + struct acpi_ac *ac = seq->private; + + + if (!ac) + return 0; + + if (acpi_ac_get_state(ac)) { + seq_puts(seq, "ERROR: Unable to read AC Adapter state\n"); + return 0; + } + + seq_puts(seq, "state: "); + switch (ac->state) { + case ACPI_AC_STATUS_OFFLINE: + seq_puts(seq, "off-line\n"); + break; + case ACPI_AC_STATUS_ONLINE: + seq_puts(seq, "on-line\n"); + break; + default: + seq_puts(seq, "unknown\n"); + break; + } + + return 0; +} + +static int acpi_ac_open_fs(struct inode *inode, struct file *file) +{ + return single_open(file, acpi_ac_seq_show, PDE_DATA(inode)); +} + +static int acpi_ac_add_fs(struct acpi_ac *ac) +{ + struct proc_dir_entry *entry = NULL; + + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded," + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); + if (!acpi_device_dir(ac->device)) { + acpi_device_dir(ac->device) = + proc_mkdir(acpi_device_bid(ac->device), acpi_ac_dir); + if (!acpi_device_dir(ac->device)) + return -ENODEV; + } + + /* 'state' [R] */ + entry = proc_create_data(ACPI_AC_FILE_STATE, + S_IRUGO, acpi_device_dir(ac->device), + &acpi_ac_fops, ac); + if (!entry) + return -ENODEV; + return 0; +} + +static int acpi_ac_remove_fs(struct acpi_ac *ac) +{ + + if (acpi_device_dir(ac->device)) { + remove_proc_entry(ACPI_AC_FILE_STATE, + acpi_device_dir(ac->device)); + remove_proc_entry(acpi_device_bid(ac->device), acpi_ac_dir); + acpi_device_dir(ac->device) = NULL; + } + + return 0; +} +#endif + /* -------------------------------------------------------------------------- Driver Model -------------------------------------------------------------------------- */ @@ -221,6 +320,11 @@ static int acpi_ac_add(struct acpi_device *device) goto end; ac->charger.name = acpi_device_bid(device); +#ifdef CONFIG_ACPI_PROCFS_POWER + result = acpi_ac_add_fs(ac); + if (result) + goto end; +#endif ac->charger.type = POWER_SUPPLY_TYPE_MAINS; ac->charger.properties = ac_props; ac->charger.num_properties = ARRAY_SIZE(ac_props); @@ -234,8 +338,12 @@ static int acpi_ac_add(struct acpi_device *device) ac->state ? "on-line" : "off-line"); end: - if (result) + if (result) { +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_ac_remove_fs(ac); +#endif kfree(ac); + } dmi_check_system(ac_dmi_table); return result; @@ -278,6 +386,10 @@ static int acpi_ac_remove(struct acpi_device *device) if (ac->charger.dev) power_supply_unregister(&ac->charger); +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_ac_remove_fs(ac); +#endif + kfree(ac); return 0; @@ -290,9 +402,20 @@ static int __init acpi_ac_init(void) if (acpi_disabled) return -ENODEV; +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_ac_dir = acpi_lock_ac_dir(); + if (!acpi_ac_dir) + return -ENODEV; +#endif + + result = acpi_bus_register_driver(&acpi_ac_driver); - if (result < 0) + if (result < 0) { +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_unlock_ac_dir(acpi_ac_dir); +#endif return -ENODEV; + } return 0; } @@ -300,6 +423,9 @@ static int __init acpi_ac_init(void) static void __exit acpi_ac_exit(void) { acpi_bus_unregister_driver(&acpi_ac_driver); +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_unlock_ac_dir(acpi_ac_dir); +#endif } module_init(acpi_ac_init); module_exit(acpi_ac_exit); diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index efa71d66e8b0..0f004159a317 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #ifdef CONFIG_ACPI_PROCFS_POWER @@ -1070,6 +1071,28 @@ static struct dmi_system_id bat_dmi_table[] = { {}, }; +/* + * Some machines'(E,G Lenovo Z480) ECs are not stable + * during boot up and this causes battery driver fails to be + * probed due to failure of getting battery information + * from EC sometimes. After several retries, the operation + * may work. So add retry code here and 20ms sleep between + * every retries. + */ +static int acpi_battery_update_retry(struct acpi_battery *battery) +{ + int retry, ret; + + for (retry = 5; retry; retry--) { + ret = acpi_battery_update(battery); + if (!ret) + break; + + msleep(20); + } + return ret; +} + static int acpi_battery_add(struct acpi_device *device) { int result = 0; @@ -1088,9 +1111,11 @@ static int acpi_battery_add(struct acpi_device *device) mutex_init(&battery->sysfs_lock); if (acpi_has_method(battery->device->handle, "_BIX")) set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); - result = acpi_battery_update(battery); + + result = acpi_battery_update_retry(battery); if (result) goto fail; + #ifdef CONFIG_ACPI_PROCFS_POWER result = acpi_battery_add_fs(device); #endif diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index ad11ba4a412d..49d89909b4ed 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -78,6 +78,9 @@ enum { EC_FLAGS_BLOCKED, /* Transactions are blocked */ }; +#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ +#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */ + /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; module_param(ec_delay, uint, 0644); @@ -109,7 +112,7 @@ struct transaction { u8 ri; u8 wlen; u8 rlen; - bool done; + u8 flags; }; struct acpi_ec *boot_ec, *first_ec; @@ -150,60 +153,74 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) outb(data, ec->data_addr); } -static int ec_transaction_done(struct acpi_ec *ec) +static int ec_transaction_completed(struct acpi_ec *ec) { unsigned long flags; int ret = 0; spin_lock_irqsave(&ec->lock, flags); - if (!ec->curr || ec->curr->done) + if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) ret = 1; spin_unlock_irqrestore(&ec->lock, flags); return ret; } -static void start_transaction(struct acpi_ec *ec) +static bool advance_transaction(struct acpi_ec *ec) { - ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; - ec->curr->done = false; - acpi_ec_write_cmd(ec, ec->curr->command); -} - -static void advance_transaction(struct acpi_ec *ec, u8 status) -{ - unsigned long flags; struct transaction *t; + u8 status; + bool wakeup = false; - spin_lock_irqsave(&ec->lock, flags); + pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK"); + status = acpi_ec_read_status(ec); t = ec->curr; if (!t) - goto unlock; - if (t->wlen > t->wi) { - if ((status & ACPI_EC_FLAG_IBF) == 0) - acpi_ec_write_data(ec, - t->wdata[t->wi++]); - else - goto err; - } else if (t->rlen > t->ri) { - if ((status & ACPI_EC_FLAG_OBF) == 1) { - t->rdata[t->ri++] = acpi_ec_read_data(ec); - if (t->rlen == t->ri) - t->done = true; + goto err; + if (t->flags & ACPI_EC_COMMAND_POLL) { + if (t->wlen > t->wi) { + if ((status & ACPI_EC_FLAG_IBF) == 0) + acpi_ec_write_data(ec, t->wdata[t->wi++]); + else + goto err; + } else if (t->rlen > t->ri) { + if ((status & ACPI_EC_FLAG_OBF) == 1) { + t->rdata[t->ri++] = acpi_ec_read_data(ec); + if (t->rlen == t->ri) { + t->flags |= ACPI_EC_COMMAND_COMPLETE; + wakeup = true; + } + } else + goto err; + } else if (t->wlen == t->wi && + (status & ACPI_EC_FLAG_IBF) == 0) { + t->flags |= ACPI_EC_COMMAND_COMPLETE; + wakeup = true; + } + return wakeup; + } else { + if ((status & ACPI_EC_FLAG_IBF) == 0) { + acpi_ec_write_cmd(ec, t->command); + t->flags |= ACPI_EC_COMMAND_POLL; } else goto err; - } else if (t->wlen == t->wi && - (status & ACPI_EC_FLAG_IBF) == 0) - t->done = true; - goto unlock; + return wakeup; + } err: /* * If SCI bit is set, then don't think it's a false IRQ * otherwise will take a not handled IRQ as a false one. */ - if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI)) - ++t->irq_count; + if (!(status & ACPI_EC_FLAG_SCI)) { + if (in_interrupt() && t) + ++t->irq_count; + } + return wakeup; +} -unlock: - spin_unlock_irqrestore(&ec->lock, flags); +static void start_transaction(struct acpi_ec *ec) +{ + ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; + ec->curr->flags = 0; + (void)advance_transaction(ec); } static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); @@ -228,15 +245,17 @@ static int ec_poll(struct acpi_ec *ec) /* don't sleep with disabled interrupts */ if (EC_FLAGS_MSI || irqs_disabled()) { udelay(ACPI_EC_MSI_UDELAY); - if (ec_transaction_done(ec)) + if (ec_transaction_completed(ec)) return 0; } else { if (wait_event_timeout(ec->wait, - ec_transaction_done(ec), + ec_transaction_completed(ec), msecs_to_jiffies(1))) return 0; } - advance_transaction(ec, acpi_ec_read_status(ec)); + spin_lock_irqsave(&ec->lock, flags); + (void)advance_transaction(ec); + spin_unlock_irqrestore(&ec->lock, flags); } while (time_before(jiffies, delay)); pr_debug("controller reset, restart transaction\n"); spin_lock_irqsave(&ec->lock, flags); @@ -268,23 +287,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, return ret; } -static int ec_check_ibf0(struct acpi_ec *ec) -{ - u8 status = acpi_ec_read_status(ec); - return (status & ACPI_EC_FLAG_IBF) == 0; -} - -static int ec_wait_ibf0(struct acpi_ec *ec) -{ - unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); - /* interrupt wait manually if GPE mode is not active */ - while (time_before(jiffies, delay)) - if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), - msecs_to_jiffies(1))) - return 0; - return -ETIME; -} - static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) { int status; @@ -305,12 +307,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) goto unlock; } } - if (ec_wait_ibf0(ec)) { - pr_err("input buffer is not empty, " - "aborting transaction\n"); - status = -ETIME; - goto end; - } pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n", t->command, t->wdata ? t->wdata[0] : 0); /* disable GPE during transaction if storm is detected */ @@ -334,7 +330,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) set_bit(EC_FLAGS_GPE_STORM, &ec->flags); } pr_debug("transaction end\n"); -end: if (ec->global_lock) acpi_release_global_lock(glk); unlock: @@ -634,17 +629,14 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state) static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, u32 gpe_number, void *data) { + unsigned long flags; struct acpi_ec *ec = data; - u8 status = acpi_ec_read_status(ec); - pr_debug("~~~> interrupt, status:0x%02x\n", status); - - advance_transaction(ec, status); - if (ec_transaction_done(ec) && - (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { + spin_lock_irqsave(&ec->lock, flags); + if (advance_transaction(ec)) wake_up(&ec->wait); - ec_check_sci(ec, acpi_ec_read_status(ec)); - } + spin_unlock_irqrestore(&ec->lock, flags); + ec_check_sci(ec, acpi_ec_read_status(ec)); return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; } diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 0bdacc5e26a3..2ba8f02ced36 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -77,7 +77,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) switch (ares->type) { case ACPI_RESOURCE_TYPE_MEMORY24: memory24 = &ares->data.memory24; - if (!memory24->address_length) + if (!memory24->minimum && !memory24->address_length) return false; acpi_dev_get_memresource(res, memory24->minimum, memory24->address_length, @@ -85,7 +85,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) break; case ACPI_RESOURCE_TYPE_MEMORY32: memory32 = &ares->data.memory32; - if (!memory32->address_length) + if (!memory32->minimum && !memory32->address_length) return false; acpi_dev_get_memresource(res, memory32->minimum, memory32->address_length, @@ -93,7 +93,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) break; case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: fixed_memory32 = &ares->data.fixed_memory32; - if (!fixed_memory32->address_length) + if (!fixed_memory32->address && !fixed_memory32->address_length) return false; acpi_dev_get_memresource(res, fixed_memory32->address, fixed_memory32->address_length, @@ -150,7 +150,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) switch (ares->type) { case ACPI_RESOURCE_TYPE_IO: io = &ares->data.io; - if (!io->address_length) + if (!io->minimum && !io->address_length) return false; acpi_dev_get_ioresource(res, io->minimum, io->address_length, @@ -158,7 +158,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) break; case ACPI_RESOURCE_TYPE_FIXED_IO: fixed_io = &ares->data.fixed_io; - if (!fixed_io->address_length) + if (!fixed_io->address && !fixed_io->address_length) return false; acpi_dev_get_ioresource(res, fixed_io->address, fixed_io->address_length, diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 165c2c299e57..d3bffa478eca 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -155,13 +155,23 @@ static int __init cma_activate_area(struct cma *cma) base_pfn = pfn; for (j = pageblock_nr_pages; j; --j, pfn++) { WARN_ON_ONCE(!pfn_valid(pfn)); + /* + * alloc_contig_range requires the pfn range + * specified to be in the same zone. Make this + * simple by forcing the entire CMA resv range + * to be in the same zone. + */ if (page_zone(pfn_to_page(pfn)) != zone) - return -EINVAL; + goto err; } init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i); return 0; + +err: + kfree(cma->bitmap); + return -EINVAL; } static struct cma cma_areas[MAX_CMA_AREAS]; diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index d915707d2ba1..93dcad0c1cbe 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c @@ -138,7 +138,9 @@ static int i8k_smm(struct smm_regs *regs) if (!alloc_cpumask_var(&old_mask, GFP_KERNEL)) return -ENOMEM; cpumask_copy(old_mask, ¤t->cpus_allowed); - set_cpus_allowed_ptr(current, cpumask_of(0)); + rc = set_cpus_allowed_ptr(current, cpumask_of(0)); + if (rc) + goto out; if (smp_processor_id() != 0) { rc = -EBUSY; goto out; diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index 27c83e45eaed..611b936ecffe 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -190,16 +190,13 @@ static int s2mps11_clk_probe(struct platform_device *pdev) goto err_reg; } - s2mps11_clk->lookup = devm_kzalloc(&pdev->dev, - sizeof(struct clk_lookup), GFP_KERNEL); + s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk, + s2mps11_name(s2mps11_clk), NULL); if (!s2mps11_clk->lookup) { ret = -ENOMEM; goto err_lup; } - s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk); - s2mps11_clk->lookup->clk = s2mps11_clk->clk; - clkdev_add(s2mps11_clk->lookup); } diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c index f9b59c7e48e9..9be47a829144 100644 --- a/drivers/clk/qcom/mmcc-msm8960.c +++ b/drivers/clk/qcom/mmcc-msm8960.c @@ -1208,7 +1208,7 @@ static struct clk_branch rot_clk = { static u8 mmcc_pxo_hdmi_map[] = { [P_PXO] = 0, - [P_HDMI_PLL] = 2, + [P_HDMI_PLL] = 3, }; static const char *mmcc_pxo_hdmi[] = { diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c index c2d204315546..125eba86c844 100644 --- a/drivers/clk/spear/spear3xx_clock.c +++ b/drivers/clk/spear/spear3xx_clock.c @@ -211,7 +211,7 @@ static inline void spear310_clk_init(void) { } /* array of all spear 320 clock lookups */ #ifdef CONFIG_MACH_SPEAR320 -#define SPEAR320_CONTROL_REG (soc_config_base + 0x0000) +#define SPEAR320_CONTROL_REG (soc_config_base + 0x0010) #define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018) #define SPEAR320_UARTX_PCLK_MASK 0x1 diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 74945652dd7a..dac58f67307a 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -47,7 +47,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o # LITTLE drivers, so that it is probed last. obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o -obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o +obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 6d98c37c87ad..ae52c777339d 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -132,6 +132,7 @@ static struct pstate_funcs pstate_funcs; struct perf_limits { int no_turbo; + int turbo_disabled; int max_perf_pct; int min_perf_pct; int32_t max_perf; @@ -291,7 +292,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, if (ret != 1) return -EINVAL; limits.no_turbo = clamp_t(int, input, 0 , 1); - + if (limits.turbo_disabled) { + pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); + limits.no_turbo = limits.turbo_disabled; + } return count; } @@ -361,21 +365,21 @@ static int byt_get_min_pstate(void) { u64 value; rdmsrl(BYT_RATIOS, value); - return (value >> 8) & 0x3F; + return (value >> 8) & 0x7F; } static int byt_get_max_pstate(void) { u64 value; rdmsrl(BYT_RATIOS, value); - return (value >> 16) & 0x3F; + return (value >> 16) & 0x7F; } static int byt_get_turbo_pstate(void) { u64 value; rdmsrl(BYT_TURBO_RATIOS, value); - return value & 0x3F; + return value & 0x7F; } static void byt_set_pstate(struct cpudata *cpudata, int pstate) @@ -385,7 +389,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) u32 vid; val = pstate << 8; - if (limits.no_turbo) + if (limits.no_turbo && !limits.turbo_disabled) val |= (u64)1 << 32; vid_fp = cpudata->vid.min + mul_fp( @@ -409,8 +413,8 @@ static void byt_get_vid(struct cpudata *cpudata) rdmsrl(BYT_VIDS, value); - cpudata->vid.min = int_tofp((value >> 8) & 0x3f); - cpudata->vid.max = int_tofp((value >> 16) & 0x3f); + cpudata->vid.min = int_tofp((value >> 8) & 0x7f); + cpudata->vid.max = int_tofp((value >> 16) & 0x7f); cpudata->vid.ratio = div_fp( cpudata->vid.max - cpudata->vid.min, int_tofp(cpudata->pstate.max_pstate - @@ -452,7 +456,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate) u64 val; val = pstate << 8; - if (limits.no_turbo) + if (limits.no_turbo && !limits.turbo_disabled) val |= (u64)1 << 32; wrmsrl(MSR_IA32_PERF_CTL, val); @@ -705,9 +709,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu = all_cpu_data[cpunum]; - intel_pstate_get_cpu_pstates(cpu); - cpu->cpu = cpunum; + intel_pstate_get_cpu_pstates(cpu); init_timer_deferrable(&cpu->timer); cpu->timer.function = intel_pstate_timer_func; @@ -750,7 +753,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits.min_perf = int_tofp(1); limits.max_perf_pct = 100; limits.max_perf = int_tofp(1); - limits.no_turbo = 0; + limits.no_turbo = limits.turbo_disabled; return 0; } limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; @@ -790,6 +793,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) { struct cpudata *cpu; int rc; + u64 misc_en; rc = intel_pstate_init_cpu(policy->cpu); if (rc) @@ -797,8 +801,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) cpu = all_cpu_data[policy->cpu]; - if (!limits.no_turbo && - limits.min_perf_pct == 100 && limits.max_perf_pct == 100) + rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); + if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || + cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) { + limits.turbo_disabled = 1; + limits.no_turbo = 1; + } + if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else policy->policy = CPUFREQ_POLICY_POWERSAVE; diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 1d80bd3636c5..b512a4ba7569 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -453,8 +453,8 @@ static int caam_jr_probe(struct platform_device *pdev) int error; jrdev = &pdev->dev; - jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), - GFP_KERNEL); + jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr), + GFP_KERNEL); if (!jrpriv) return -ENOMEM; @@ -487,10 +487,8 @@ static int caam_jr_probe(struct platform_device *pdev) /* Now do the platform independent part */ error = caam_jr_init(jrdev); /* now turn on hardware */ - if (error) { - kfree(jrpriv); + if (error) return error; - } jrpriv->dev = jrdev; spin_lock(&driver_data.jr_alloc_lock); diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 28d24caa49f3..3c78b2268209 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) if (base == 0) return 0; + /* make sure we don't clobber the GTT if it's within stolen memory */ + if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) { + struct { + u32 start, end; + } stolen[2] = { + { .start = base, .end = base + dev_priv->gtt.stolen_size, }, + { .start = base, .end = base + dev_priv->gtt.stolen_size, }, + }; + u64 gtt_start, gtt_end; + + gtt_start = I915_READ(PGTBL_CTL); + if (IS_GEN4(dev)) + gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) | + (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28; + else + gtt_start &= PGTBL_ADDRESS_LO_MASK; + gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4; + + if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end) + stolen[0].end = gtt_start; + if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end) + stolen[1].start = gtt_end; + + /* pick the larger of the two chunks */ + if (stolen[0].end - stolen[0].start > + stolen[1].end - stolen[1].start) { + base = stolen[0].start; + dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start; + } else { + base = stolen[1].start; + dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start; + } + + if (stolen[0].start != stolen[1].start || + stolen[0].end != stolen[1].end) { + DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", + (unsigned long long) gtt_start, + (unsigned long long) gtt_end - 1); + DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", + base, base + (u32) dev_priv->gtt.stolen_size - 1); + } + } + + /* Verify that nothing else uses this physical address. Stolen * memory should be reserved by the BIOS and hidden from the * kernel. So if the region is already marked as busy, something diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a48b7cad6f11..0a3b9386eb43 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -631,6 +631,9 @@ /* * Instruction and interrupt control regs */ +#define PGTBL_CTL 0x02020 +#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */ +#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */ #define PGTBL_ER 0x02024 #define RENDER_RING_BASE 0x02000 #define BSD_RING_BASE 0x04000 diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 9241e96f8502..5fa854c84d62 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -1161,7 +1161,7 @@ static int ci_stop_dpm(struct radeon_device *rdev) tmp &= ~GLOBAL_PWRMGT_EN; WREG32_SMC(GENERAL_PWRMGT, tmp); - tmp = RREG32(SCLK_PWRMGT_CNTL); + tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); tmp &= ~DYNAMIC_PM_EN; WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 971d9339ce80..64108dbc7d45 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -189,7 +189,7 @@ static const u32 evergreen_golden_registers[] = 0x8c1c, 0xffffffff, 0x00001010, 0x28350, 0xffffffff, 0x00000000, 0xa008, 0xffffffff, 0x00010000, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x9508, 0xffffffff, 0x00000002, 0x913c, 0x0000000f, 0x0000000a }; @@ -476,7 +476,7 @@ static const u32 cedar_golden_registers[] = 0x8c1c, 0xffffffff, 0x00001010, 0x28350, 0xffffffff, 0x00000000, 0xa008, 0xffffffff, 0x00010000, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x9508, 0xffffffff, 0x00000002 }; @@ -635,7 +635,7 @@ static const u32 juniper_mgcg_init[] = static const u32 supersumo_golden_registers[] = { 0x5eb4, 0xffffffff, 0x00000002, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x7030, 0xffffffff, 0x00000011, 0x7c30, 0xffffffff, 0x00000011, 0x6104, 0x01000300, 0x00000000, @@ -719,7 +719,7 @@ static const u32 sumo_golden_registers[] = static const u32 wrestler_golden_registers[] = { 0x5eb4, 0xffffffff, 0x00000002, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x7030, 0xffffffff, 0x00000011, 0x7c30, 0xffffffff, 0x00000011, 0x6104, 0x01000300, 0x00000000, diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 72d3616de08e..95b693c11640 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -646,8 +646,10 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) return -EINVAL; } addr = addr & 0xFFFFFFFFFFFFF000ULL; - addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; - addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; + if (addr != rdev->dummy_page.addr) + addr |= R600_PTE_VALID | R600_PTE_READABLE | + R600_PTE_WRITEABLE; + addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED; writeq(addr, ptr + (i * 8)); return 0; } diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index b5f63f5e22a3..8fcb932a3a55 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2332,12 +2332,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev) pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_MEMORY_SS, 0); - /* disable ss, causes hangs on some cayman boards */ - if (rdev->family == CHIP_CAYMAN) { - pi->sclk_ss = false; - pi->mclk_ss = false; - } - if (pi->sclk_ss || pi->mclk_ss) pi->dynamic_ss = true; else diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 05827eccc53a..ce5a9f2584f3 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -319,9 +319,13 @@ static void process_chn_event(u32 relid) */ do { - hv_begin_read(&channel->inbound); + if (read_state) + hv_begin_read(&channel->inbound); channel->onchannel_callback(arg); - bytes_to_read = hv_end_read(&channel->inbound); + if (read_state) + bytes_to_read = hv_end_read(&channel->inbound); + else + bytes_to_read = 0; } while (read_state && (bytes_to_read != 0)); } else { pr_err("no channel callback for relid - %u\n", relid); diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c index 29dd9f746dfa..233b374334ed 100644 --- a/drivers/hwmon/adm1021.c +++ b/drivers/hwmon/adm1021.c @@ -185,7 +185,7 @@ static ssize_t set_temp_max(struct device *dev, struct i2c_client *client = to_i2c_client(dev); struct adm1021_data *data = i2c_get_clientdata(client); long temp; - int err; + int reg_val, err; err = kstrtol(buf, 10, &temp); if (err) @@ -193,10 +193,11 @@ static ssize_t set_temp_max(struct device *dev, temp /= 1000; mutex_lock(&data->update_lock); - data->temp_max[index] = clamp_val(temp, -128, 127); + reg_val = clamp_val(temp, -128, 127); + data->temp_max[index] = reg_val * 1000; if (!read_only) i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index), - data->temp_max[index]); + reg_val); mutex_unlock(&data->update_lock); return count; @@ -210,7 +211,7 @@ static ssize_t set_temp_min(struct device *dev, struct i2c_client *client = to_i2c_client(dev); struct adm1021_data *data = i2c_get_clientdata(client); long temp; - int err; + int reg_val, err; err = kstrtol(buf, 10, &temp); if (err) @@ -218,10 +219,11 @@ static ssize_t set_temp_min(struct device *dev, temp /= 1000; mutex_lock(&data->update_lock); - data->temp_min[index] = clamp_val(temp, -128, 127); + reg_val = clamp_val(temp, -128, 127); + data->temp_min[index] = reg_val * 1000; if (!read_only) i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index), - data->temp_min[index]); + reg_val); mutex_unlock(&data->update_lock); return count; diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c index d19c790e410a..e38115ce0350 100644 --- a/drivers/hwmon/adm1029.c +++ b/drivers/hwmon/adm1029.c @@ -232,6 +232,9 @@ static ssize_t set_fan_div(struct device *dev, /* Update the value */ reg = (reg & 0x3F) | (val << 6); + /* Update the cache */ + data->fan_div[attr->index] = reg; + /* Write value */ i2c_smbus_write_byte_data(client, ADM1029_REG_FAN_DIV[attr->index], reg); diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c index a8a540ca8c34..51c1a5a165ab 100644 --- a/drivers/hwmon/adm1031.c +++ b/drivers/hwmon/adm1031.c @@ -365,6 +365,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr, if (ret) return ret; + val = clamp_val(val, 0, 127000); mutex_lock(&data->update_lock); data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]); adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr), @@ -394,6 +395,7 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr, if (ret) return ret; + val = clamp_val(val, 0, 127000); mutex_lock(&data->update_lock); data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], data->pwm[nr]); @@ -696,7 +698,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, if (ret) return ret; - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); + val = clamp_val(val, -55000, 127000); mutex_lock(&data->update_lock); data->temp_min[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr), @@ -717,7 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, if (ret) return ret; - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); + val = clamp_val(val, -55000, 127000); mutex_lock(&data->update_lock); data->temp_max[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr), @@ -738,7 +740,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr, if (ret) return ret; - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); + val = clamp_val(val, -55000, 127000); mutex_lock(&data->update_lock); data->temp_crit[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr), diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c index eea817296513..9f2be3dd28f3 100644 --- a/drivers/hwmon/amc6821.c +++ b/drivers/hwmon/amc6821.c @@ -704,7 +704,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, get_temp_alarm, NULL, IDX_TEMP1_MAX); static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, get_temp_alarm, NULL, IDX_TEMP1_CRIT); -static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR, +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, get_temp, NULL, IDX_TEMP2_INPUT); static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp, set_temp, IDX_TEMP2_MIN); diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c index 2c137b26acb4..5790246a7e1d 100644 --- a/drivers/hwmon/emc2103.c +++ b/drivers/hwmon/emc2103.c @@ -250,9 +250,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da, if (result < 0) return result; - val = DIV_ROUND_CLOSEST(val, 1000); - if ((val < -63) || (val > 127)) - return -EINVAL; + val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127); mutex_lock(&data->update_lock); data->temp_min[nr] = val; @@ -274,9 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da, if (result < 0) return result; - val = DIV_ROUND_CLOSEST(val, 1000); - if ((val < -63) || (val > 127)) - return -EINVAL; + val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127); mutex_lock(&data->update_lock); data->temp_max[nr] = val; @@ -390,15 +386,14 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da, { struct emc2103_data *data = emc2103_update_device(dev); struct i2c_client *client = to_i2c_client(dev); - long rpm_target; + unsigned long rpm_target; - int result = kstrtol(buf, 10, &rpm_target); + int result = kstrtoul(buf, 10, &rpm_target); if (result < 0) return result; /* Datasheet states 16384 as maximum RPM target (table 3.2) */ - if ((rpm_target < 0) || (rpm_target > 16384)) - return -EINVAL; + rpm_target = clamp_val(rpm_target, 0, 16384); mutex_lock(&data->update_lock); diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 31e786e3999b..63b2bb6bdbbc 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -375,7 +375,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, return -EAGAIN; } } - map_val = chan->channel + TOTAL_CHANNELS; + map_val = adc_dev->channel_step[chan->scan_index]; /* * We check the complete FIFO. We programmed just one entry but in case diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 3842ac738f98..db404a0f7e2c 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -32,7 +33,7 @@ struct dm_io_client { struct io { unsigned long error_bits; atomic_t count; - struct task_struct *sleeper; + struct completion *wait; struct dm_io_client *client; io_notify_fn callback; void *context; @@ -121,8 +122,8 @@ static void dec_count(struct io *io, unsigned int region, int error) invalidate_kernel_vmap_range(io->vma_invalidate_address, io->vma_invalidate_size); - if (io->sleeper) - wake_up_process(io->sleeper); + if (io->wait) + complete(io->wait); else { unsigned long r = io->error_bits; @@ -387,6 +388,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, */ volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); + DECLARE_COMPLETION_ONSTACK(wait); if (num_regions > 1 && (rw & RW_MASK) != WRITE) { WARN_ON(1); @@ -395,7 +397,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, io->error_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ - io->sleeper = current; + io->wait = &wait; io->client = client; io->vma_invalidate_address = dp->vma_invalidate_address; @@ -403,15 +405,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, dispatch_io(rw, num_regions, where, dp, io, 1); - while (1) { - set_current_state(TASK_UNINTERRUPTIBLE); - - if (!atomic_read(&io->count)) - break; - - io_schedule(); - } - set_current_state(TASK_RUNNING); + wait_for_completion_io(&wait); if (error_bits) *error_bits = io->error_bits; @@ -434,7 +428,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, io = mempool_alloc(client->pool, GFP_NOIO); io->error_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ - io->sleeper = NULL; + io->wait = NULL; io->client = client; io->callback = fn; io->context = context; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8c53b09b9a2c..65ee3a0d4683 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -54,6 +54,8 @@ static void do_deferred_remove(struct work_struct *w); static DECLARE_WORK(deferred_remove_work, do_deferred_remove); +static struct workqueue_struct *deferred_remove_workqueue; + /* * For bio-based dm. * One of these is allocated per bio. @@ -283,16 +285,24 @@ static int __init local_init(void) if (r) goto out_free_rq_tio_cache; + deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); + if (!deferred_remove_workqueue) { + r = -ENOMEM; + goto out_uevent_exit; + } + _major = major; r = register_blkdev(_major, _name); if (r < 0) - goto out_uevent_exit; + goto out_free_workqueue; if (!_major) _major = r; return 0; +out_free_workqueue: + destroy_workqueue(deferred_remove_workqueue); out_uevent_exit: dm_uevent_exit(); out_free_rq_tio_cache: @@ -306,6 +316,7 @@ out_free_io_cache: static void local_exit(void) { flush_scheduled_work(); + destroy_workqueue(deferred_remove_workqueue); kmem_cache_destroy(_rq_tio_cache); kmem_cache_destroy(_io_cache); @@ -414,7 +425,7 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode) if (atomic_dec_and_test(&md->open_count) && (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) - schedule_work(&deferred_remove_work); + queue_work(deferred_remove_workqueue, &deferred_remove_work); dm_put(md); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index ed1b6db25b03..be36adf33ab0 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3085,8 +3085,13 @@ static int pci_af_flr(struct pci_dev *dev, int probe) if (probe) return 0; - /* Wait for Transaction Pending bit clean */ - if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP)) + /* + * Wait for Transaction Pending bit to clear. A word-aligned test + * is used, so we use the conrol offset rather than status and shift + * the test bit to match. + */ + if (pci_wait_for_pending(dev, pos + PCI_AF_CTRL, + PCI_AF_STATUS_TP << 8)) goto clear; dev_err(&dev->dev, "transaction is not cleared; " diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 6c738376daff..34d56f7864d6 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -553,8 +553,9 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops, return phy; put_dev: - put_device(&phy->dev); - ida_remove(&phy_ida, phy->id); + put_device(&phy->dev); /* calls phy_release() which frees resources */ + return ERR_PTR(ret); + free_phy: kfree(phy); return ERR_PTR(ret); @@ -738,7 +739,7 @@ static void phy_release(struct device *dev) phy = to_phy(dev); dev_vdbg(dev, "releasing '%s'\n", dev_name(dev)); - ida_remove(&phy_ida, phy->id); + ida_simple_remove(&phy_ida, phy->id); kfree(phy); } diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c index 1ecfe3bd92ac..1cff2a21db67 100644 --- a/drivers/rtc/rtc-puv3.c +++ b/drivers/rtc/rtc-puv3.c @@ -71,7 +71,7 @@ static int puv3_rtc_setpie(struct device *dev, int enabled) { unsigned int tmp; - dev_debug(dev, "%s: pie=%d\n", __func__, enabled); + dev_dbg(dev, "%s: pie=%d\n", __func__, enabled); spin_lock_irq(&puv3_rtc_pie_lock); tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE; @@ -140,7 +140,7 @@ static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) rtc_tm_to_time(tm, &rtcalarm_count); writel(rtcalarm_count, RTC_RTAR); - puv3_rtc_setaie(&dev->dev, alrm->enabled); + puv3_rtc_setaie(dev, alrm->enabled); if (alrm->enabled) enable_irq_wake(puv3_rtc_alarmno); diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index fdb07199d9c2..1967bee4f076 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c @@ -140,6 +140,12 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon, return NULL; } +static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz) +{ + unsigned long temp; + return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp); +} + int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) { struct thermal_hwmon_device *hwmon; @@ -189,21 +195,18 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) if (result) goto free_temp_mem; - if (tz->ops->get_crit_temp) { - unsigned long temperature; - if (!tz->ops->get_crit_temp(tz, &temperature)) { - snprintf(temp->temp_crit.name, - sizeof(temp->temp_crit.name), + if (thermal_zone_crit_temp_valid(tz)) { + snprintf(temp->temp_crit.name, + sizeof(temp->temp_crit.name), "temp%d_crit", hwmon->count); - temp->temp_crit.attr.attr.name = temp->temp_crit.name; - temp->temp_crit.attr.attr.mode = 0444; - temp->temp_crit.attr.show = temp_crit_show; - sysfs_attr_init(&temp->temp_crit.attr.attr); - result = device_create_file(hwmon->device, - &temp->temp_crit.attr); - if (result) - goto unregister_input; - } + temp->temp_crit.attr.attr.name = temp->temp_crit.name; + temp->temp_crit.attr.attr.mode = 0444; + temp->temp_crit.attr.show = temp_crit_show; + sysfs_attr_init(&temp->temp_crit.attr.attr); + result = device_create_file(hwmon->device, + &temp->temp_crit.attr); + if (result) + goto unregister_input; } mutex_lock(&thermal_hwmon_list_lock); @@ -250,7 +253,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) } device_remove_file(hwmon->device, &temp->temp_input.attr); - if (tz->ops->get_crit_temp) + if (thermal_zone_crit_temp_valid(tz)) device_remove_file(hwmon->device, &temp->temp_crit.attr); mutex_lock(&thermal_hwmon_list_lock); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 762e4a5f5ae9..330df5ce435b 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -153,6 +153,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 115662c16dcc..8a3813be1b28 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -720,7 +720,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, - { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) }, + { USB_DEVICE(TESTO_VID, TESTO_1_PID) }, + { USB_DEVICE(TESTO_VID, TESTO_3_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) }, @@ -944,6 +945,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, + /* Infineon Devices */ + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 500474c48f4b..c4777bc6aee0 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -584,6 +584,12 @@ #define RATOC_PRODUCT_ID_USB60F 0xb020 /* + * Infineon Technologies + */ +#define INFINEON_VID 0x058b +#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ + +/* * Acton Research Corp. */ #define ACTON_VID 0x0647 /* Vendor ID */ @@ -798,7 +804,8 @@ * Submitted by Colin Leroy */ #define TESTO_VID 0x128D -#define TESTO_USB_INTERFACE_PID 0x0001 +#define TESTO_1_PID 0x0001 +#define TESTO_3_PID 0x0003 /* * Mobility Electronics products. diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index e25e8ca09fe2..9da566a3f5c8 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1487,6 +1487,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 3981ff783950..171b9fa0f27a 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -962,10 +962,10 @@ retry: continue; } - if (ei->i_es_lru_nr == 0 || ei == locked_ei) + if (ei->i_es_lru_nr == 0 || ei == locked_ei || + !write_trylock(&ei->i_es_lock)) continue; - write_lock(&ei->i_es_lock); shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan); if (ei->i_es_lru_nr == 0) list_del_init(&ei->i_es_lru); diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 0ee59a6644e2..64bb32f17903 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -851,6 +851,13 @@ got: goto out; } + BUFFER_TRACE(group_desc_bh, "get_write_access"); + err = ext4_journal_get_write_access(handle, group_desc_bh); + if (err) { + ext4_std_error(sb, err); + goto out; + } + /* We may have to initialize the block bitmap if it isn't already */ if (ext4_has_group_desc_csum(sb) && gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { @@ -887,13 +894,6 @@ got: } } - BUFFER_TRACE(group_desc_bh, "get_write_access"); - err = ext4_journal_get_write_access(handle, group_desc_bh); - if (err) { - ext4_std_error(sb, err); - goto out; - } - /* Update the relevant bg descriptor fields */ if (ext4_has_group_desc_csum(sb)) { int free; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 08ddfdac955c..502f0fd71470 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -751,8 +751,8 @@ void ext4_mb_generate_buddy(struct super_block *sb, if (free != grp->bb_free) { ext4_grp_locked_error(sb, group, 0, 0, - "%u clusters in bitmap, %u in gd; " - "block bitmap corrupt.", + "block bitmap and bg descriptor " + "inconsistent: %u vs %u free clusters", free, grp->bb_free); /* * If we intend to continue, we consider group descriptor diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 710fed2377d4..25b327e87318 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1519,8 +1519,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, arg = JBD2_DEFAULT_MAX_COMMIT_AGE; sbi->s_commit_interval = HZ * arg; } else if (token == Opt_max_batch_time) { - if (arg == 0) - arg = EXT4_DEF_MAX_BATCH_TIME; sbi->s_max_batch_time = arg; } else if (token == Opt_min_batch_time) { sbi->s_min_batch_time = arg; @@ -2793,10 +2791,11 @@ static void print_daily_error_info(unsigned long arg) es = sbi->s_es; if (es->s_error_count) - ext4_msg(sb, KERN_NOTICE, "error count: %u", + /* fsck newer than v1.41.13 is needed to clean this condition. */ + ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u", le32_to_cpu(es->s_error_count)); if (es->s_first_error_time) { - printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d", + printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_first_error_time), (int) sizeof(es->s_first_error_func), es->s_first_error_func, @@ -2810,7 +2809,7 @@ static void print_daily_error_info(unsigned long arg) printk("\n"); } if (es->s_last_error_time) { - printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d", + printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_last_error_time), (int) sizeof(es->s_last_error_func), es->s_last_error_func, diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 60bb365f54a5..f8a5d6a166fb 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1590,9 +1590,12 @@ int jbd2_journal_stop(handle_t *handle) * to perform a synchronous write. We do this to detect the * case where a single process is doing a stream of sync * writes. No point in waiting for joiners in that case. + * + * Setting max_batch_time to 0 disables this completely. */ pid = current->pid; - if (handle->h_sync && journal->j_last_sync_writer != pid) { + if (handle->h_sync && journal->j_last_sync_writer != pid && + journal->j_max_batch_time) { u64 commit_time, trans_time; journal->j_last_sync_writer = pid; diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index d69cf637a15a..49a4d6f59108 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k __ring_buffer_alloc((size), (flags), &__key); \ }) -void ring_buffer_wait(struct ring_buffer *buffer, int cpu); +int ring_buffer_wait(struct ring_buffer *buffer, int cpu); int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, struct file *filp, poll_table *poll_table); diff --git a/kernel/cpuset.c b/kernel/cpuset.c index e6b1b66afe52..6b27e5c0cd86 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1236,7 +1236,13 @@ done: int current_cpuset_is_being_rebound(void) { - return task_cs(current) == cpuset_being_rebound; + int ret; + + rcu_read_lock(); + ret = task_cs(current) == cpuset_being_rebound; + rcu_read_unlock(); + + return ret; } static int update_relax_domain_level(struct cpuset *cs, s64 val) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index fc4da2d97f9b..04202d9aa514 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -543,7 +543,7 @@ static void rb_wake_up_waiters(struct irq_work *work) * as data is added to any of the @buffer's cpu buffers. Otherwise * it will wait for data to be added to a specific cpu buffer. */ -void ring_buffer_wait(struct ring_buffer *buffer, int cpu) +int ring_buffer_wait(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; DEFINE_WAIT(wait); @@ -557,6 +557,8 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu) if (cpu == RING_BUFFER_ALL_CPUS) work = &buffer->irq_work; else { + if (!cpumask_test_cpu(cpu, buffer->cpumask)) + return -ENODEV; cpu_buffer = buffer->buffers[cpu]; work = &cpu_buffer->irq_work; } @@ -591,6 +593,7 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu) schedule(); finish_wait(&work->waiters, &wait); + return 0; } /** diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index fd21e601a891..922657f30723 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1091,13 +1091,13 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) } #endif /* CONFIG_TRACER_MAX_TRACE */ -static void default_wait_pipe(struct trace_iterator *iter) +static int default_wait_pipe(struct trace_iterator *iter) { /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) - return; + return 0; - ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); + return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); } #ifdef CONFIG_FTRACE_STARTUP_TEST @@ -4160,17 +4160,19 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table) * * Anyway, this is really very primitive wakeup. */ -void poll_wait_pipe(struct trace_iterator *iter) +int poll_wait_pipe(struct trace_iterator *iter) { set_current_state(TASK_INTERRUPTIBLE); /* sleep for 100 msecs, and try again. */ schedule_timeout(HZ / 10); + return 0; } /* Must be called with trace_types_lock mutex held. */ static int tracing_wait_pipe(struct file *filp) { struct trace_iterator *iter = filp->private_data; + int ret; while (trace_empty(iter)) { @@ -4180,10 +4182,13 @@ static int tracing_wait_pipe(struct file *filp) mutex_unlock(&iter->mutex); - iter->trace->wait_pipe(iter); + ret = iter->trace->wait_pipe(iter); mutex_lock(&iter->mutex); + if (ret) + return ret; + if (signal_pending(current)) return -EINTR; @@ -5111,8 +5116,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, goto out_unlock; } mutex_unlock(&trace_types_lock); - iter->trace->wait_pipe(iter); + ret = iter->trace->wait_pipe(iter); mutex_lock(&trace_types_lock); + if (ret) { + size = ret; + goto out_unlock; + } if (signal_pending(current)) { size = -EINTR; goto out_unlock; @@ -5324,8 +5333,10 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, goto out; } mutex_unlock(&trace_types_lock); - iter->trace->wait_pipe(iter); + ret = iter->trace->wait_pipe(iter); mutex_lock(&trace_types_lock); + if (ret) + goto out; if (signal_pending(current)) { ret = -EINTR; goto out; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 02b592f2d4b7..c8bd809cbd1c 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -336,7 +336,7 @@ struct tracer { void (*stop)(struct trace_array *tr); void (*open)(struct trace_iterator *iter); void (*pipe_open)(struct trace_iterator *iter); - void (*wait_pipe)(struct trace_iterator *iter); + int (*wait_pipe)(struct trace_iterator *iter); void (*close)(struct trace_iterator *iter); void (*pipe_close)(struct trace_iterator *iter); ssize_t (*read)(struct trace_iterator *iter, @@ -552,7 +552,7 @@ void trace_init_global_iter(struct trace_iterator *iter); void tracing_iter_reset(struct trace_iterator *iter, int cpu); -void poll_wait_pipe(struct trace_iterator *iter); +int poll_wait_pipe(struct trace_iterator *iter); void tracing_sched_switch_trace(struct trace_array *tr, struct task_struct *prev, diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b6a394108e3b..b4defdecec8a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3415,6 +3415,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) } } + dev_set_uevent_suppress(&wq_dev->dev, false); kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); return 0; } @@ -5026,7 +5027,7 @@ static void __init wq_numa_init(void) BUG_ON(!tbl); for_each_node(node) - BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, + BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node_online(node) ? node : NUMA_NO_NODE)); for_each_possible_cpu(cpu) { diff --git a/mm/Kconfig b/mm/Kconfig index 9b63c1584a42..0862816bb455 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -580,3 +580,18 @@ config PGTABLE_MAPPING You can check speed with zsmalloc benchmark: https://github.com/spartacus06/zsmapbench + +config MAX_STACK_SIZE_MB + int "Maximum user stack size for 32-bit processes (MB)" + default 80 + range 8 256 if METAG + range 8 2048 + depends on STACK_GROWSUP && (!64BIT || COMPAT) + help + This is the maximum stack size in Megabytes in the VM layout of 32-bit + user processes when the stack grows upwards (currently only on parisc + and metag arch). The stack will be located at the highest memory + address minus the given value, unless the RLIMIT_STACK hard limit is + changed to a smaller value in which case that is used. + + A sane initial value is 80 MB. diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 9c6288aea4f9..15a8ea031526 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2170,7 +2170,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) } else *new = *old; - rcu_read_lock(); if (current_cpuset_is_being_rebound()) { nodemask_t mems = cpuset_mems_allowed(current); if (new->flags & MPOL_F_REBINDING) @@ -2178,7 +2177,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) else mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); } - rcu_read_unlock(); atomic_set(&new->refcnt, 1); return new; }