diff --git a/Documentation/cpu-freq/pcc-cpufreq.txt b/Documentation/cpu-freq/pcc-cpufreq.txt index 0a94224ad296..9e3c3b33514c 100644 --- a/Documentation/cpu-freq/pcc-cpufreq.txt +++ b/Documentation/cpu-freq/pcc-cpufreq.txt @@ -159,8 +159,8 @@ to be strictly associated with a P-state. 2.2 cpuinfo_transition_latency: ------------------------------- -The cpuinfo_transition_latency field is CPUFREQ_ETERNAL. The PCC specification -does not include a field to expose this value currently. +The cpuinfo_transition_latency field is 0. The PCC specification does +not include a field to expose this value currently. 2.3 cpuinfo_cur_freq: --------------------- diff --git a/Makefile b/Makefile index 66da9a38b13b..84335c0b2eda 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 7 -SUBLEVEL = 0 +SUBLEVEL = 1 EXTRAVERSION = NAME = Psychotic Stoned Sheep diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 087acb569b63..5f221acd21ae 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -279,8 +279,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, mm_segment_t fs; long ret, err, i; - if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event))) + if (maxevents <= 0 || + maxevents > (INT_MAX/sizeof(*kbuf)) || + maxevents > (INT_MAX/sizeof(*events))) return -EINVAL; + if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents)) + return -EFAULT; kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL); if (!kbuf) return -ENOMEM; @@ -317,6 +321,8 @@ asmlinkage long sys_oabi_semtimedop(int semid, if (nsops < 1 || nsops > SEMOPM) return -EINVAL; + if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops)) + return -EFAULT; sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); if (!sops) return -ENOMEM; diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 9c0b387d6427..51d3988933f8 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -348,7 +348,7 @@ EXPORT(sysn32_call_table) PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key PTR sys_request_key - PTR sys_keyctl /* 6245 */ + PTR compat_sys_keyctl /* 6245 */ PTR sys_set_thread_area PTR sys_inotify_init PTR sys_inotify_add_watch diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index f4f28b1580de..6efa7136748f 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -504,7 +504,7 @@ EXPORT(sys32_call_table) PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key /* 4280 */ PTR sys_request_key - PTR sys_keyctl + PTR compat_sys_keyctl PTR sys_set_thread_area PTR sys_inotify_init PTR sys_inotify_add_watch /* 4285 */ diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 4cddd17153fb..f848572169ea 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -294,7 +294,7 @@ # 285 sys_setaltroot 286 i386 add_key sys_add_key 287 i386 request_key sys_request_key -288 i386 keyctl sys_keyctl +288 i386 keyctl sys_keyctl compat_sys_keyctl 289 i386 ioprio_set sys_ioprio_set 290 i386 ioprio_get sys_ioprio_get 291 i386 inotify_init sys_inotify_init diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 9d3a96c4da78..01c2d14ec05f 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -133,13 +133,11 @@ static inline unsigned int x86_cpuid_family(void) #ifdef CONFIG_MICROCODE extern void __init load_ucode_bsp(void); extern void load_ucode_ap(void); -extern int __init save_microcode_in_initrd(void); void reload_early_microcode(void); extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); #else static inline void __init load_ucode_bsp(void) { } static inline void load_ucode_ap(void) { } -static inline int __init save_microcode_in_initrd(void) { return 0; } static inline void reload_early_microcode(void) { } static inline bool get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; } diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index ac360bfbbdb6..12823b6ebd6d 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -175,7 +175,7 @@ void load_ucode_ap(void) } } -int __init save_microcode_in_initrd(void) +static int __init save_microcode_in_initrd(void) { struct cpuinfo_x86 *c = &boot_cpu_data; @@ -691,4 +691,5 @@ int __init microcode_init(void) return error; } +fs_initcall(save_microcode_in_initrd); late_initcall(microcode_init); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 372aad2b3291..dffd162db0a4 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -696,13 +696,6 @@ void free_initmem(void) void __init free_initrd_mem(unsigned long start, unsigned long end) { /* - * Remember, initrd memory may contain microcode or other useful things. - * Before we lose initrd mem, we need to find a place to hold them - * now that normal virtual memory is enabled. - */ - save_microcode_in_initrd(); - - /* * end could be not aligned, and We can not align that, * decompresser could be confused by aligned initrd_end * We already reserve the end partial page before in diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S index 3177c2bc26f6..8eee0e9c93f0 100644 --- a/arch/x86/power/hibernate_asm_64.S +++ b/arch/x86/power/hibernate_asm_64.S @@ -24,7 +24,6 @@ #include ENTRY(swsusp_arch_suspend) - FRAME_BEGIN movq $saved_context, %rax movq %rsp, pt_regs_sp(%rax) movq %rbp, pt_regs_bp(%rax) @@ -48,6 +47,7 @@ ENTRY(swsusp_arch_suspend) movq %cr3, %rax movq %rax, restore_cr3(%rip) + FRAME_BEGIN call swsusp_save FRAME_END ret @@ -104,7 +104,6 @@ ENTRY(core_restore_code) /* code below belongs to the image kernel */ .align PAGE_SIZE ENTRY(restore_registers) - FRAME_BEGIN /* go back to the original page tables */ movq %r9, %cr3 @@ -145,6 +144,5 @@ ENTRY(restore_registers) /* tell the hibernation core that we've just restored the memory */ movq %rax, in_suspend(%rip) - FRAME_END ret ENDPROC(restore_registers) diff --git a/block/genhd.c b/block/genhd.c index 9f42526b4d62..3eebd256b765 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -856,6 +856,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v) if (iter) { class_dev_iter_exit(iter); kfree(iter); + seqf->private = NULL; } } diff --git a/crypto/gcm.c b/crypto/gcm.c index bec329b3de8d..d9ea5f9c0574 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -639,7 +639,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK); + CRYPTO_ALG_TYPE_AHASH_MASK | + crypto_requires_sync(algt->type, + algt->mask)); if (IS_ERR(ghash_alg)) return PTR_ERR(ghash_alg); diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index ea5815c5e128..bc769c448d4a 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -72,7 +72,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, void scatterwalk_done(struct scatter_walk *walk, int out, int more) { - if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more) + if (!more || walk->offset >= walk->sg->offset + walk->sg->length || + !(walk->offset & (PAGE_SIZE - 1))) scatterwalk_pagedone(walk, out, more); } EXPORT_SYMBOL_GPL(scatterwalk_done); diff --git a/drivers/char/random.c b/drivers/char/random.c index 0158d3bff7e5..87ab9f6b4112 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -723,15 +723,18 @@ retry: } } -static void credit_entropy_bits_safe(struct entropy_store *r, int nbits) +static int credit_entropy_bits_safe(struct entropy_store *r, int nbits) { const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1)); + if (nbits < 0) + return -EINVAL; + /* Cap the value to avoid overflows */ nbits = min(nbits, nbits_max); - nbits = max(nbits, -nbits_max); credit_entropy_bits(r, nbits); + return 0; } /********************************************************************* @@ -1543,8 +1546,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EPERM; if (get_user(ent_count, p)) return -EFAULT; - credit_entropy_bits_safe(&input_pool, ent_count); - return 0; + return credit_entropy_bits_safe(&input_pool, ent_count); case RNDADDENTROPY: if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1558,8 +1560,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) size); if (retval < 0) return retval; - credit_entropy_bits_safe(&input_pool, ent_count); - return 0; + return credit_entropy_bits_safe(&input_pool, ent_count); case RNDZAPENTCNT: case RNDCLEARPOOL: /* diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index a7ecb9a84c15..3f0ce2ae35ee 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -555,8 +555,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->min = policy->cpuinfo.min_freq = ioread32(&pcch_hdr->minimum_frequency) * 1000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - pr_debug("init: policy->max is %d, policy->min is %d\n", policy->max, policy->min); out: diff --git a/drivers/infiniband/hw/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig index a925fb0db706..f846fd51b85b 100644 --- a/drivers/infiniband/hw/hfi1/Kconfig +++ b/drivers/infiniband/hw/hfi1/Kconfig @@ -3,7 +3,6 @@ config INFINIBAND_HFI1 depends on X86_64 && INFINIBAND_RDMAVT select MMU_NOTIFIER select CRC32 - default m ---help--- This is a low-level driver for Intel OPA Gen1 adapter. config HFI1_DEBUG_SDMA_ORDER diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a2afa3be17a4..4d7981946f79 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1422,7 +1422,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) return -EINVAL; } - if (slave_ops->ndo_set_mac_address == NULL) { + if (slave_dev->type == ARPHRD_INFINIBAND && + BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { + netdev_warn(bond_dev, "Type (%d) supports only active-backup mode\n", + slave_dev->type); + res = -EOPNOTSUPP; + goto err_undo_flags; + } + + if (!slave_ops->ndo_set_mac_address || + slave_dev->type == ARPHRD_INFINIBAND) { netdev_warn(bond_dev, "The slave device specified does not support setting the MAC address\n"); if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP && bond->params.fail_over_mac != BOND_FOM_ACTIVE) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index b122f6013b6c..03601dfc0642 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -809,13 +809,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, * in a bitmap and increasing the chain consumer only * for the first successive completed entries. */ - bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE); + __set_bit(pos, p_spq->p_comp_bitmap); while (test_bit(p_spq->comp_bitmap_idx, p_spq->p_comp_bitmap)) { - bitmap_clear(p_spq->p_comp_bitmap, - p_spq->comp_bitmap_idx, - SPQ_RING_SIZE); + __clear_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap); p_spq->comp_bitmap_idx++; qed_chain_return_produced(&p_spq->chain); } diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 8bcd78f94966..a70b6c460178 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -942,7 +942,6 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb, } macsec_skb_cb(skb)->req = req; - macsec_skb_cb(skb)->rx_sa = rx_sa; skb->dev = dev; aead_request_set_callback(req, 0, macsec_decrypt_done, skb); @@ -1169,6 +1168,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) } } + macsec_skb_cb(skb)->rx_sa = rx_sa; + /* Disabled && !changed text => skip validation */ if (hdr->tci_an & MACSEC_TCI_C || secy->validate_frames != MACSEC_VALIDATE_DISABLED) diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index ce362bd51de7..45b57c294d13 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -300,6 +300,8 @@ static int mvebu_uart_startup(struct uart_port *port) static void mvebu_uart_shutdown(struct uart_port *port) { writel(0, port->membase + UART_CTRL); + + free_irq(port->irq, port); } static void mvebu_uart_set_termios(struct uart_port *port, diff --git a/fs/dcache.c b/fs/dcache.c index d6847d7b123d..1ed81bb80500 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -622,7 +622,6 @@ static struct dentry *dentry_kill(struct dentry *dentry) failed: spin_unlock(&dentry->d_lock); - cpu_relax(); return dentry; /* try again with same dentry */ } @@ -796,6 +795,8 @@ void dput(struct dentry *dentry) return; repeat: + might_sleep(); + rcu_read_lock(); if (likely(fast_dput(dentry))) { rcu_read_unlock(); @@ -829,8 +830,10 @@ repeat: kill_it: dentry = dentry_kill(dentry); - if (dentry) + if (dentry) { + cond_resched(); goto repeat; + } } EXPORT_SYMBOL(dput); diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 3020fd70c392..1ea505434a6e 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -208,6 +208,9 @@ static int ext4_init_block_bitmap(struct super_block *sb, memset(bh->b_data, 0, sb->s_blocksize); bit_max = ext4_num_base_meta_clusters(sb, block_group); + if ((bit_max >> 3) >= bh->b_size) + return -EFSCORRUPTED; + for (bit = 0; bit < bit_max; bit++) ext4_set_bit(bit, bh->b_data); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 2a2eef9c14e4..d7ccb7f51dfc 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -381,9 +381,13 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) ext4_fsblk_t block = ext4_ext_pblock(ext); int len = ext4_ext_get_actual_len(ext); ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); - ext4_lblk_t last = lblock + len - 1; - if (len == 0 || lblock > last) + /* + * We allow neither: + * - zero length + * - overflow/wrap-around + */ + if (lblock + len <= lblock) return 0; return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); } @@ -474,6 +478,10 @@ static int __ext4_ext_check(const char *function, unsigned int line, error_msg = "invalid extent entries"; goto corrupted; } + if (unlikely(depth > 32)) { + error_msg = "too large eh_depth"; + goto corrupted; + } /* Verify checksum on non-root extent tree nodes */ if (ext_depth(inode) != depth && !ext4_extent_block_csum_verify(inode, eh)) { diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index f7140ca66e3b..b747ec09c1ac 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -205,9 +205,9 @@ void ext4_evict_inode(struct inode *inode) * Note that directories do not have this problem because they * don't use page cache. */ - if (ext4_should_journal_data(inode) && - (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && - inode->i_ino != EXT4_JOURNAL_INO) { + if (inode->i_ino != EXT4_JOURNAL_INO && + ext4_should_journal_data(inode) && + (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; @@ -2748,13 +2748,36 @@ retry: done = true; } } - ext4_journal_stop(handle); + /* + * Caution: If the handle is synchronous, + * ext4_journal_stop() can wait for transaction commit + * to finish which may depend on writeback of pages to + * complete or on page lock to be released. In that + * case, we have to wait until after after we have + * submitted all the IO, released page locks we hold, + * and dropped io_end reference (for extent conversion + * to be able to complete) before stopping the handle. + */ + if (!ext4_handle_valid(handle) || handle->h_sync == 0) { + ext4_journal_stop(handle); + handle = NULL; + } /* Submit prepared bio */ ext4_io_submit(&mpd.io_submit); /* Unlock pages we didn't use */ mpage_release_unused_pages(&mpd, give_up_on_write); - /* Drop our io_end reference we got from init */ - ext4_put_io_end(mpd.io_submit.io_end); + /* + * Drop our io_end reference we got from init. We have + * to be careful and use deferred io_end finishing if + * we are still holding the transaction as we can + * release the last reference to io_end which may end + * up doing unwritten extent conversion. + */ + if (handle) { + ext4_put_io_end_defer(mpd.io_submit.io_end); + ext4_journal_stop(handle); + } else + ext4_put_io_end(mpd.io_submit.io_end); if (ret == -ENOSPC && sbi->s_journal) { /* diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index c1ab3ec30423..7f42eda52523 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2939,7 +2939,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ext4_error(sb, "Allocating blocks %llu-%llu which overlap " "fs metadata", block, block+len); /* File system mounted not to panic on error - * Fix the bitmap and repeat the block allocation + * Fix the bitmap and return EFSCORRUPTED * We leak some of the blocks here. */ ext4_lock_group(sb, ac->ac_b_ex.fe_group); @@ -2948,7 +2948,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ext4_unlock_group(sb, ac->ac_b_ex.fe_group); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); if (!err) - err = -EAGAIN; + err = -EFSCORRUPTED; goto out_err; } @@ -4513,18 +4513,7 @@ repeat: } if (likely(ac->ac_status == AC_STATUS_FOUND)) { *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); - if (*errp == -EAGAIN) { - /* - * drop the reference that we took - * in ext4_mb_use_best_found - */ - ext4_mb_release_context(ac); - ac->ac_b_ex.fe_group = 0; - ac->ac_b_ex.fe_start = 0; - ac->ac_b_ex.fe_len = 0; - ac->ac_status = AC_STATUS_CONTINUE; - goto repeat; - } else if (*errp) { + if (*errp) { ext4_discard_allocated_blocks(ac); goto errout; } else { diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 3822a5aedc61..639bd756a8d8 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2278,6 +2278,16 @@ static void ext4_orphan_cleanup(struct super_block *sb, while (es->s_last_orphan) { struct inode *inode; + /* + * We may have encountered an error during cleanup; if + * so, skip the rest. + */ + if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { + jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); + es->s_last_orphan = 0; + break; + } + inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); if (IS_ERR(inode)) { es->s_last_orphan = 0; @@ -3416,6 +3426,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; } + if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { + ext4_msg(sb, KERN_ERR, + "Number of reserved GDT blocks insanely large: %d", + le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks)); + goto failed_mount; + } + if (sbi->s_mount_opt & EXT4_MOUNT_DAX) { err = bdev_dax_supported(sb, blocksize); if (err) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 9154f8679024..6cac3dc33521 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -417,6 +417,15 @@ static int fuse_flush(struct file *file, fl_owner_t id) fuse_sync_writes(inode); inode_unlock(inode); + if (test_bit(AS_ENOSPC, &file->f_mapping->flags) && + test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags)) + err = -ENOSPC; + if (test_bit(AS_EIO, &file->f_mapping->flags) && + test_and_clear_bit(AS_EIO, &file->f_mapping->flags)) + err = -EIO; + if (err) + return err; + req = fuse_get_req_nofail_nopages(fc, file); memset(&inarg, 0, sizeof(inarg)); inarg.fh = ff->fh; @@ -462,6 +471,21 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end, goto out; fuse_sync_writes(inode); + + /* + * Due to implementation of fuse writeback + * filemap_write_and_wait_range() does not catch errors. + * We have to do this directly after fuse_sync_writes() + */ + if (test_bit(AS_ENOSPC, &file->f_mapping->flags) && + test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags)) + err = -ENOSPC; + if (test_bit(AS_EIO, &file->f_mapping->flags) && + test_and_clear_bit(AS_EIO, &file->f_mapping->flags)) + err = -EIO; + if (err) + goto out; + err = sync_inode_metadata(inode, 1); if (err) goto out; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 9961d8432ce3..9b7cb37b4ba8 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -942,7 +942,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | - FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | + FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT | FUSE_PARALLEL_DIROPS; diff --git a/fs/inode.c b/fs/inode.c index 4ccbc21b30ce..9ea421948742 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -345,7 +345,7 @@ EXPORT_SYMBOL(inc_nlink); void address_space_init_once(struct address_space *mapping) { memset(mapping, 0, sizeof(*mapping)); - INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); + INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT); spin_lock_init(&mapping->tree_lock); init_rwsem(&mapping->i_mmap_rwsem); INIT_LIST_HEAD(&mapping->private_list); @@ -1740,8 +1740,8 @@ static int __remove_privs(struct dentry *dentry, int kill) */ int file_remove_privs(struct file *file) { - struct dentry *dentry = file->f_path.dentry; - struct inode *inode = d_inode(dentry); + struct dentry *dentry = file_dentry(file); + struct inode *inode = file_inode(file); int kill; int error = 0; @@ -1749,7 +1749,7 @@ int file_remove_privs(struct file *file) if (IS_NOSEC(inode)) return 0; - kill = file_needs_remove_privs(file); + kill = dentry_needs_remove_privs(dentry); if (kill < 0) return kill; if (kill) diff --git a/fs/ioctl.c b/fs/ioctl.c index 116a333e9c77..0f56deb24ce6 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -590,6 +590,7 @@ static long ioctl_file_dedupe_range(struct file *file, void __user *arg) goto out; } + same->dest_count = count; ret = vfs_dedupe_file_range(file, same); if (ret) goto out; diff --git a/ipc/msg.c b/ipc/msg.c index 1471db9a7e61..c6521c205cb4 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -680,7 +680,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, rcu_read_lock(); ipc_lock_object(&msq->q_perm); - ipc_rcu_putref(msq, ipc_rcu_free); + ipc_rcu_putref(msq, msg_rcu_free); /* raced with RMID? */ if (!ipc_valid_object(&msq->q_perm)) { err = -EIDRM; diff --git a/ipc/sem.c b/ipc/sem.c index b3757ea0694b..5d2f875e8e2e 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -449,7 +449,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns static inline void sem_lock_and_putref(struct sem_array *sma) { sem_lock(sma, NULL, -1); - ipc_rcu_putref(sma, ipc_rcu_free); + ipc_rcu_putref(sma, sem_rcu_free); } static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) @@ -1392,7 +1392,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, rcu_read_unlock(); sem_io = ipc_alloc(sizeof(ushort)*nsems); if (sem_io == NULL) { - ipc_rcu_putref(sma, ipc_rcu_free); + ipc_rcu_putref(sma, sem_rcu_free); return -ENOMEM; } @@ -1426,20 +1426,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, if (nsems > SEMMSL_FAST) { sem_io = ipc_alloc(sizeof(ushort)*nsems); if (sem_io == NULL) { - ipc_rcu_putref(sma, ipc_rcu_free); + ipc_rcu_putref(sma, sem_rcu_free); return -ENOMEM; } } if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { - ipc_rcu_putref(sma, ipc_rcu_free); + ipc_rcu_putref(sma, sem_rcu_free); err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { - ipc_rcu_putref(sma, ipc_rcu_free); + ipc_rcu_putref(sma, sem_rcu_free); err = -ERANGE; goto out_free; } @@ -1731,7 +1731,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) /* step 2: allocate new undo structure */ new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); if (!new) { - ipc_rcu_putref(sma, ipc_rcu_free); + ipc_rcu_putref(sma, sem_rcu_free); return ERR_PTR(-ENOMEM); } diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 8b7d8459bb9d..bc7852f95443 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -274,10 +274,11 @@ radix_tree_node_alloc(struct radix_tree_root *root) /* * Even if the caller has preloaded, try to allocate from the - * cache first for the new node to get accounted. + * cache first for the new node to get accounted to the memory + * cgroup. */ ret = kmem_cache_alloc(radix_tree_node_cachep, - gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN); + gfp_mask | __GFP_NOWARN); if (ret) goto out; @@ -300,8 +301,7 @@ radix_tree_node_alloc(struct radix_tree_root *root) kmemleak_update_trace(ret); goto out; } - ret = kmem_cache_alloc(radix_tree_node_cachep, - gfp_mask | __GFP_ACCOUNT); + ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); out: BUG_ON(radix_tree_is_internal_node(ret)); return ret; @@ -348,6 +348,12 @@ static int __radix_tree_preload(gfp_t gfp_mask) struct radix_tree_node *node; int ret = -ENOMEM; + /* + * Nodes preloaded by one cgroup can be be used by another cgroup, so + * they should never be accounted to any particular memory cgroup. + */ + gfp_mask &= ~__GFP_ACCOUNT; + preempt_disable(); rtp = this_cpu_ptr(&radix_tree_preloads); while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5339c89dff63..ca847d96a980 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4083,14 +4083,32 @@ static struct cftype mem_cgroup_legacy_files[] = { static DEFINE_IDR(mem_cgroup_idr); -static void mem_cgroup_id_get(struct mem_cgroup *memcg) +static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) { - atomic_inc(&memcg->id.ref); + atomic_add(n, &memcg->id.ref); } -static void mem_cgroup_id_put(struct mem_cgroup *memcg) +static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) { - if (atomic_dec_and_test(&memcg->id.ref)) { + while (!atomic_inc_not_zero(&memcg->id.ref)) { + /* + * The root cgroup cannot be destroyed, so it's refcount must + * always be >= 1. + */ + if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { + VM_BUG_ON(1); + break; + } + memcg = parent_mem_cgroup(memcg); + if (!memcg) + memcg = root_mem_cgroup; + } + return memcg; +} + +static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) +{ + if (atomic_sub_and_test(n, &memcg->id.ref)) { idr_remove(&mem_cgroup_idr, memcg->id.id); memcg->id.id = 0; @@ -4099,6 +4117,16 @@ static void mem_cgroup_id_put(struct mem_cgroup *memcg) } } +static inline void mem_cgroup_id_get(struct mem_cgroup *memcg) +{ + mem_cgroup_id_get_many(memcg, 1); +} + +static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) +{ + mem_cgroup_id_put_many(memcg, 1); +} + /** * mem_cgroup_from_id - look up a memcg from a memcg id * @id: the memcg id to look up @@ -4736,6 +4764,8 @@ static void __mem_cgroup_clear_mc(void) if (!mem_cgroup_is_root(mc.from)) page_counter_uncharge(&mc.from->memsw, mc.moved_swap); + mem_cgroup_id_put_many(mc.from, mc.moved_swap); + /* * we charged both to->memory and to->memsw, so we * should uncharge to->memory. @@ -4743,9 +4773,9 @@ static void __mem_cgroup_clear_mc(void) if (!mem_cgroup_is_root(mc.to)) page_counter_uncharge(&mc.to->memory, mc.moved_swap); - css_put_many(&mc.from->css, mc.moved_swap); + mem_cgroup_id_get_many(mc.to, mc.moved_swap); + css_put_many(&mc.to->css, mc.moved_swap); - /* we've already done css_get(mc.to) */ mc.moved_swap = 0; } memcg_oom_recover(from); @@ -5805,7 +5835,7 @@ subsys_initcall(mem_cgroup_init); */ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) { - struct mem_cgroup *memcg; + struct mem_cgroup *memcg, *swap_memcg; unsigned short oldid; VM_BUG_ON_PAGE(PageLRU(page), page); @@ -5820,16 +5850,27 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) if (!memcg) return; - mem_cgroup_id_get(memcg); - oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); + /* + * In case the memcg owning these pages has been offlined and doesn't + * have an ID allocated to it anymore, charge the closest online + * ancestor for the swap instead and transfer the memory+swap charge. + */ + swap_memcg = mem_cgroup_id_get_online(memcg); + oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg)); VM_BUG_ON_PAGE(oldid, page); - mem_cgroup_swap_statistics(memcg, true); + mem_cgroup_swap_statistics(swap_memcg, true); page->mem_cgroup = NULL; if (!mem_cgroup_is_root(memcg)) page_counter_uncharge(&memcg->memory, 1); + if (memcg != swap_memcg) { + if (!mem_cgroup_is_root(swap_memcg)) + page_counter_charge(&swap_memcg->memsw, 1); + page_counter_uncharge(&memcg->memsw, 1); + } + /* * Interrupts should be disabled here because the caller holds the * mapping->tree_lock lock which is taken with interrupts-off. It is @@ -5868,11 +5909,14 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) if (!memcg) return 0; + memcg = mem_cgroup_id_get_online(memcg); + if (!mem_cgroup_is_root(memcg) && - !page_counter_try_charge(&memcg->swap, 1, &counter)) + !page_counter_try_charge(&memcg->swap, 1, &counter)) { + mem_cgroup_id_put(memcg); return -ENOMEM; + } - mem_cgroup_id_get(memcg); oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); VM_BUG_ON_PAGE(oldid, page); mem_cgroup_swap_statistics(memcg, true); diff --git a/mm/mempool.c b/mm/mempool.c index 8f65464da5de..47a659dedd44 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -306,7 +306,7 @@ EXPORT_SYMBOL(mempool_resize); * returns NULL. Note that due to preallocation, this function * *never* fails when called from process contexts. (it might * fail if called from an IRQ context.) - * Note: neither __GFP_NOMEMALLOC nor __GFP_ZERO are supported. + * Note: using __GFP_ZERO is not supported. */ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) { @@ -315,27 +315,16 @@ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) wait_queue_t wait; gfp_t gfp_temp; - /* If oom killed, memory reserves are essential to prevent livelock */ - VM_WARN_ON_ONCE(gfp_mask & __GFP_NOMEMALLOC); - /* No element size to zero on allocation */ VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); - might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); + gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ gfp_mask |= __GFP_NOWARN; /* failures are OK */ gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO); repeat_alloc: - if (likely(pool->curr_nr)) { - /* - * Don't allocate from emergency reserves if there are - * elements available. This check is racy, but it will - * be rechecked each loop. - */ - gfp_temp |= __GFP_NOMEMALLOC; - } element = pool->alloc(gfp_temp, pool->pool_data); if (likely(element != NULL)) @@ -359,12 +348,11 @@ repeat_alloc: * We use gfp mask w/o direct reclaim or IO for the first round. If * alloc failed with that and @pool was empty, retry immediately. */ - if ((gfp_temp & ~__GFP_NOMEMALLOC) != gfp_mask) { + if (gfp_temp != gfp_mask) { spin_unlock_irqrestore(&pool->lock, flags); gfp_temp = gfp_mask; goto repeat_alloc; } - gfp_temp = gfp_mask; /* We must not sleep if !__GFP_DIRECT_RECLAIM */ if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 43d2cd862bc2..28d5ec269e48 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -288,6 +288,14 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb) case 0x01: /* IEEE MAC (Pause) */ goto drop; + case 0x0E: /* 802.1AB LLDP */ + fwd_mask |= p->br->group_fwd_mask; + if (fwd_mask & (1u << dest[5])) + goto forward; + *pskb = skb; + __br_handle_local_finish(skb); + return RX_HANDLER_PASS; + default: /* Allow selective forwarding for most other protocols */ fwd_mask |= p->br->group_fwd_mask; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e00e972c4e6a..700b72ca5912 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -236,7 +236,8 @@ void tcp_select_initial_window(int __space, __u32 mss, /* Set window scaling on max possible window * See RFC1323 for an explanation of the limit to 14 */ - space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); + space = max_t(u32, space, sysctl_tcp_rmem[2]); + space = max_t(u32, space, sysctl_rmem_max); space = min_t(u32, space, *window_clamp); while (space > 65535 && (*rcv_wscale) < 14) { space >>= 1; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 4aed8fc23d32..e61f7cd65d08 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1581,9 +1581,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) udp_lib_checksum_complete(skb)) goto csum_error; - if (sk_filter(sk, skb)) - goto drop; - if (unlikely(skb->len < sizeof(struct udphdr))) + if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) goto drop; udp_csum_pull_header(skb); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 47f837a58e0a..047c75a798b1 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3562,6 +3562,10 @@ restart: if (state != INET6_IFADDR_STATE_DEAD) { __ipv6_ifa_notify(RTM_DELADDR, ifa); inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); + } else { + if (idev->cnf.forwarding) + addrconf_leave_anycast(ifa); + addrconf_leave_solict(ifa->idev, &ifa->addr); } write_lock_bh(&idev->lock); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index acc09705618b..42a2edf7c9ef 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -618,9 +618,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) udp_lib_checksum_complete(skb)) goto csum_error; - if (sk_filter(sk, skb)) - goto drop; - if (unlikely(skb->len < sizeof(struct udphdr))) + if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) goto drop; udp_csum_pull_header(skb); diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 923abd6b3064..8d2f7c9b491d 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -1024,8 +1024,11 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, } /* Check if we have opened a local TSAP */ - if (!self->tsap) - irda_open_tsap(self, LSAP_ANY, addr->sir_name); + if (!self->tsap) { + err = irda_open_tsap(self, LSAP_ANY, addr->sir_name); + if (err) + goto out; + } /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; diff --git a/net/sctp/input.c b/net/sctp/input.c index 47cf4604d19c..f093322560e6 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -328,6 +328,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) */ sk = rcvr->sk; + local_bh_disable(); bh_lock_sock(sk); if (sock_owned_by_user(sk)) { @@ -339,6 +340,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) sctp_inq_push(inqueue, chunk); bh_unlock_sock(sk); + local_bh_enable(); /* If the chunk was backloged again, don't drop refs */ if (backloged) diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 9d87bba0ff1d..b335ffcef0b9 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -89,12 +89,10 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) * Eventually, we should clean up inqueue to not rely * on the BH related data structures. */ - local_bh_disable(); list_add_tail(&chunk->list, &q->in_chunk_list); if (chunk->asoc) chunk->asoc->stats.ipackets++; q->immediate.func(&q->immediate); - local_bh_enable(); } /* Peek at the next chunk on the inqeue. */ diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 67154b848aa9..7f5689a93de9 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4301,6 +4301,7 @@ int sctp_transport_walk_start(struct rhashtable_iter *iter) err = rhashtable_walk_start(iter); if (err && err != -EAGAIN) { + rhashtable_walk_stop(iter); rhashtable_walk_exit(iter); return err; } diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index ad4fa49ad1db..9068369f8a1b 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -331,6 +331,7 @@ static int aa_fs_seq_hash_show(struct seq_file *seq, void *v) seq_printf(seq, "%.2x", profile->hash[i]); seq_puts(seq, "\n"); } + aa_put_profile(profile); return 0; }