diff --git a/Makefile b/Makefile index d6db01a02252..7a246f1ce44e 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 47 +SUBLEVEL = 48 EXTRAVERSION = NAME = Petit Gorille @@ -369,11 +369,6 @@ HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS) HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) HOST_LOADLIBES := $(HOST_LFS_LIBS) -ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1) -HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \ - -Wno-missing-field-initializers -fno-delete-null-pointer-checks -endif - # Make variables (CC, etc...) AS = $(CROSS_COMPILE)as LD = $(CROSS_COMPILE)ld @@ -711,7 +706,6 @@ KBUILD_CFLAGS += $(stackp-flag) ifeq ($(cc-name),clang) KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) -KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable) KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) @@ -729,9 +723,9 @@ else # These warnings generated too much noise in a regular build. # Use make W=1 to enable them (see scripts/Makefile.extrawarn) KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) -KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) endif +KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) ifdef CONFIG_FRAME_POINTER KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls else diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 2f2d176396aa..e1ddb94a6522 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -721,6 +721,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) if (value & ~known_bits) return -EOPNOTSUPP; + /* Setting FRE without FR is not supported. */ + if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE) + return -EOPNOTSUPP; + /* Avoid inadvertently triggering emulation */ if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 006105fb12fe..e058cd300713 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -809,7 +809,7 @@ long arch_ptrace(struct task_struct *child, long request, break; } #endif - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); break; case PC: tmp = regs->cp0_epc; diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 4a157d3249ac..89026d33a07b 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -108,7 +108,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, addr & 1); break; } - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); break; case PC: tmp = regs->cp0_epc; diff --git a/arch/powerpc/include/asm/book3s/64/slice.h b/arch/powerpc/include/asm/book3s/64/slice.h new file mode 100644 index 000000000000..db0dedab65ee --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/slice.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H +#define _ASM_POWERPC_BOOK3S_64_SLICE_H + +#ifdef CONFIG_PPC_MM_SLICES + +#define SLICE_LOW_SHIFT 28 +#define SLICE_LOW_TOP (0x100000000ul) +#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) +#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) + +#define SLICE_HIGH_SHIFT 40 +#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT) +#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) + +#else /* CONFIG_PPC_MM_SLICES */ + +#define get_slice_psize(mm, addr) ((mm)->context.user_psize) +#define slice_set_user_psize(mm, psize) \ +do { \ + (mm)->context.user_psize = (psize); \ + (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ +} while (0) + +#endif /* CONFIG_PPC_MM_SLICES */ + +#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */ diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index 5bb3dbede41a..1325e5b5f680 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h @@ -169,6 +169,12 @@ typedef struct { unsigned int id; unsigned int active; unsigned long vdso_base; +#ifdef CONFIG_PPC_MM_SLICES + u16 user_psize; /* page size index */ + u64 low_slices_psize; /* page size encodings */ + unsigned char high_slices_psize[0]; + unsigned long addr_limit; +#endif } mm_context_t; #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h new file mode 100644 index 000000000000..95d532e18092 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/slice.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H +#define _ASM_POWERPC_NOHASH_32_SLICE_H + +#ifdef CONFIG_PPC_MM_SLICES + +#define SLICE_LOW_SHIFT 28 +#define SLICE_LOW_TOP (0x100000000ull) +#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) +#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) + +#define SLICE_HIGH_SHIFT 0 +#define SLICE_NUM_HIGH 0ul +#define GET_HIGH_SLICE_INDEX(addr) (addr & 0) + +#endif /* CONFIG_PPC_MM_SLICES */ + +#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */ diff --git a/arch/powerpc/include/asm/nohash/64/slice.h b/arch/powerpc/include/asm/nohash/64/slice.h new file mode 100644 index 000000000000..ad0d6e3cc1c5 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/64/slice.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_64_SLICE_H +#define _ASM_POWERPC_NOHASH_64_SLICE_H + +#ifdef CONFIG_PPC_64K_PAGES +#define get_slice_psize(mm, addr) MMU_PAGE_64K +#else /* CONFIG_PPC_64K_PAGES */ +#define get_slice_psize(mm, addr) MMU_PAGE_4K +#endif /* !CONFIG_PPC_64K_PAGES */ +#define slice_set_user_psize(mm, psize) do { BUG(); } while (0) + +#endif /* _ASM_POWERPC_NOHASH_64_SLICE_H */ diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 8da5d4c1cab2..d5f1c41b7dba 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -344,5 +344,6 @@ typedef struct page *pgtable_t; #include #endif /* __ASSEMBLY__ */ +#include #endif /* _ASM_POWERPC_PAGE_H */ diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index c4d9654bd637..af04acdb873f 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -86,65 +86,6 @@ extern u64 ppc64_pft_size; #endif /* __ASSEMBLY__ */ -#ifdef CONFIG_PPC_MM_SLICES - -#define SLICE_LOW_SHIFT 28 -#define SLICE_HIGH_SHIFT 40 - -#define SLICE_LOW_TOP (0x100000000ul) -#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) -#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT) - -#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) -#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) - -#ifndef __ASSEMBLY__ -struct mm_struct; - -extern unsigned long slice_get_unmapped_area(unsigned long addr, - unsigned long len, - unsigned long flags, - unsigned int psize, - int topdown); - -extern unsigned int get_slice_psize(struct mm_struct *mm, - unsigned long addr); - -extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); -extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start, - unsigned long len, unsigned int psize); - -#endif /* __ASSEMBLY__ */ -#else -#define slice_init() -#ifdef CONFIG_PPC_STD_MMU_64 -#define get_slice_psize(mm, addr) ((mm)->context.user_psize) -#define slice_set_user_psize(mm, psize) \ -do { \ - (mm)->context.user_psize = (psize); \ - (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ -} while (0) -#else /* CONFIG_PPC_STD_MMU_64 */ -#ifdef CONFIG_PPC_64K_PAGES -#define get_slice_psize(mm, addr) MMU_PAGE_64K -#else /* CONFIG_PPC_64K_PAGES */ -#define get_slice_psize(mm, addr) MMU_PAGE_4K -#endif /* !CONFIG_PPC_64K_PAGES */ -#define slice_set_user_psize(mm, psize) do { BUG(); } while(0) -#endif /* !CONFIG_PPC_STD_MMU_64 */ - -#define slice_set_range_psize(mm, start, len, psize) \ - slice_set_user_psize((mm), (psize)) -#endif /* CONFIG_PPC_MM_SLICES */ - -#ifdef CONFIG_HUGETLB_PAGE - -#ifdef CONFIG_PPC_MM_SLICES -#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -#endif - -#endif /* !CONFIG_HUGETLB_PAGE */ - #define VM_DATA_DEFAULT_FLAGS \ (is_32bit_task() ? \ VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64) diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h new file mode 100644 index 000000000000..172711fadb1c --- /dev/null +++ b/arch/powerpc/include/asm/slice.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_SLICE_H +#define _ASM_POWERPC_SLICE_H + +#ifdef CONFIG_PPC_BOOK3S_64 +#include +#elif defined(CONFIG_PPC64) +#include +#elif defined(CONFIG_PPC_MMU_NOHASH) +#include +#endif + +#ifdef CONFIG_PPC_MM_SLICES + +#ifdef CONFIG_HUGETLB_PAGE +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#endif +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + +#ifndef __ASSEMBLY__ + +struct mm_struct; + +unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, + unsigned long flags, unsigned int psize, + int topdown); + +unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr); + +void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); +void slice_set_range_psize(struct mm_struct *mm, unsigned long start, + unsigned long len, unsigned int psize); +#endif /* __ASSEMBLY__ */ + +#else /* CONFIG_PPC_MM_SLICES */ + +#define slice_set_range_psize(mm, start, len, psize) \ + slice_set_user_psize((mm), (psize)) +#endif /* CONFIG_PPC_MM_SLICES */ + +#endif /* _ASM_POWERPC_SLICE_H */ diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index b4fcb54b9686..008447664643 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -915,6 +915,8 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_PPC_MM_SLICES #ifdef CONFIG_PPC64 init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64; +#elif defined(CONFIG_PPC_8xx) + init_mm.context.addr_limit = DEFAULT_MAP_WINDOW; #else #error "context.addr_limit not initialized." #endif diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c index f29212e40f40..0be77709446c 100644 --- a/arch/powerpc/mm/8xx_mmu.c +++ b/arch/powerpc/mm/8xx_mmu.c @@ -192,7 +192,7 @@ void set_context(unsigned long id, pgd_t *pgd) mtspr(SPRN_M_TW, __pa(pgd) - offset); /* Update context */ - mtspr(SPRN_M_CASID, id); + mtspr(SPRN_M_CASID, id - 1); /* sync */ mb(); } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 1571a498a33f..4c9e5f9c7a44 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -552,9 +552,11 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct hstate *hstate = hstate_file(file); int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); +#ifdef CONFIG_PPC_RADIX_MMU if (radix_enabled()) return radix__hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); +#endif return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1); } #endif diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 4554d6527682..e2b28b3a512e 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -331,6 +331,20 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm) { pr_hard("initing context for mm @%p\n", mm); +#ifdef CONFIG_PPC_MM_SLICES + if (!mm->context.addr_limit) + mm->context.addr_limit = DEFAULT_MAP_WINDOW; + + /* + * We have MMU_NO_CONTEXT set to be ~0. Hence check + * explicitly against context.id == 0. This ensures that we properly + * initialize context slice details for newly allocated mm's (which will + * have id == 0) and don't alter context slice inherited via fork (which + * will have id != 0). + */ + if (mm->context.id == 0) + slice_set_user_psize(mm, mmu_virtual_psize); +#endif mm->context.id = MMU_NO_CONTEXT; mm->context.active = 0; return 0; @@ -428,8 +442,8 @@ void __init mmu_context_init(void) * -- BenH */ if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { - first_context = 0; - last_context = 15; + first_context = 1; + last_context = 16; no_selective_tlbil = true; } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { first_context = 1; diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index a4f93699194b..8baaa6c6f21c 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -73,10 +73,12 @@ static void slice_range_to_mask(unsigned long start, unsigned long len, unsigned long end = start + len - 1; ret->low_slices = 0; - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); + if (SLICE_NUM_HIGH) + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); if (start < SLICE_LOW_TOP) { - unsigned long mend = min(end, (SLICE_LOW_TOP - 1)); + unsigned long mend = min(end, + (unsigned long)(SLICE_LOW_TOP - 1)); ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1)) - (1u << GET_LOW_SLICE_INDEX(start)); @@ -113,11 +115,13 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) unsigned long start = slice << SLICE_HIGH_SHIFT; unsigned long end = start + (1ul << SLICE_HIGH_SHIFT); +#ifdef CONFIG_PPC64 /* Hack, so that each addresses is controlled by exactly one * of the high or low area bitmaps, the first high area starts * at 4GB, not 0 */ if (start == 0) start = SLICE_LOW_TOP; +#endif return !slice_area_is_free(mm, start, end - start); } @@ -127,7 +131,8 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret) unsigned long i; ret->low_slices = 0; - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); + if (SLICE_NUM_HIGH) + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); for (i = 0; i < SLICE_NUM_LOW; i++) if (!slice_low_has_vma(mm, i)) @@ -149,7 +154,8 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma u64 lpsizes; ret->low_slices = 0; - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); + if (SLICE_NUM_HIGH) + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); lpsizes = mm->context.low_slices_psize; for (i = 0; i < SLICE_NUM_LOW; i++) @@ -171,6 +177,10 @@ static int slice_check_fit(struct mm_struct *mm, DECLARE_BITMAP(result, SLICE_NUM_HIGH); unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit); + if (!SLICE_NUM_HIGH) + return (mask.low_slices & available.low_slices) == + mask.low_slices; + bitmap_and(result, mask.high_slices, available.high_slices, slice_count); @@ -180,6 +190,7 @@ static int slice_check_fit(struct mm_struct *mm, static void slice_flush_segments(void *parm) { +#ifdef CONFIG_PPC64 struct mm_struct *mm = parm; unsigned long flags; @@ -191,6 +202,7 @@ static void slice_flush_segments(void *parm) local_irq_save(flags); slb_flush_and_rebolt(); local_irq_restore(flags); +#endif } static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) @@ -379,21 +391,21 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src) { - DECLARE_BITMAP(result, SLICE_NUM_HIGH); - dst->low_slices |= src->low_slices; - bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH); - bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH); + if (!SLICE_NUM_HIGH) + return; + bitmap_or(dst->high_slices, dst->high_slices, src->high_slices, + SLICE_NUM_HIGH); } static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src) { - DECLARE_BITMAP(result, SLICE_NUM_HIGH); - dst->low_slices &= ~src->low_slices; - bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH); - bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH); + if (!SLICE_NUM_HIGH) + return; + bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices, + SLICE_NUM_HIGH); } #ifdef CONFIG_PPC_64K_PAGES @@ -441,14 +453,17 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, * init different masks */ mask.low_slices = 0; - bitmap_zero(mask.high_slices, SLICE_NUM_HIGH); /* silence stupid warning */; potential_mask.low_slices = 0; - bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH); compat_mask.low_slices = 0; - bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH); + + if (SLICE_NUM_HIGH) { + bitmap_zero(mask.high_slices, SLICE_NUM_HIGH); + bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH); + bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH); + } /* Sanity checks */ BUG_ON(mm->task_size == 0); @@ -586,7 +601,9 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, convert: slice_andnot_mask(&mask, &good_mask); slice_andnot_mask(&mask, &compat_mask); - if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) { + if (mask.low_slices || + (SLICE_NUM_HIGH && + !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) { slice_convert(mm, mask, psize); if (psize > MMU_PAGE_BASE) on_each_cpu(slice_flush_segments, mm, 1); diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index a78f255111f2..3ce376b42330 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -325,6 +325,7 @@ config PPC_BOOK3E_MMU config PPC_MM_SLICES bool default y if PPC_STD_MMU_64 + default y if PPC_8xx && HUGETLB_PAGE default n config PPC_HAVE_PMU_SUPPORT diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 259c75d7a2a0..dbcb01006749 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -94,6 +94,11 @@ static struct smca_bank_name smca_names[] = { [SMCA_SMU] = { "smu", "System Management Unit" }, }; +static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = +{ + [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 } +}; + const char *smca_get_name(enum smca_bank_types t) { if (t >= N_SMCA_BANK_TYPES) @@ -429,52 +434,51 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c) wrmsr(MSR_CU_DEF_ERR, low, high); } -static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high, - unsigned int bank, unsigned int block) +static u32 smca_get_block_address(unsigned int cpu, unsigned int bank, + unsigned int block) { - u32 addr = 0, offset = 0; + u32 low, high; + u32 addr = 0; - if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) + if (smca_get_bank_type(bank) == SMCA_RESERVED) return addr; - /* Get address from already initialized block. */ - if (per_cpu(threshold_banks, cpu)) { - struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank]; + if (!block) + return MSR_AMD64_SMCA_MCx_MISC(bank); - if (bankp && bankp->blocks) { - struct threshold_block *blockp = &bankp->blocks[block]; + /* Check our cache first: */ + if (smca_bank_addrs[bank][block] != -1) + return smca_bank_addrs[bank][block]; - if (blockp) - return blockp->address; - } - } + /* + * For SMCA enabled processors, BLKPTR field of the first MISC register + * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4). + */ + if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) + goto out; - if (mce_flags.smca) { - if (smca_get_bank_type(bank) == SMCA_RESERVED) - return addr; + if (!(low & MCI_CONFIG_MCAX)) + goto out; - if (!block) { - addr = MSR_AMD64_SMCA_MCx_MISC(bank); - } else { - /* - * For SMCA enabled processors, BLKPTR field of the - * first MISC register (MCx_MISC0) indicates presence of - * additional MISC register set (MISC1-4). - */ - u32 low, high; + if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && + (low & MASK_BLKPTR_LO)) + addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); - if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) - return addr; +out: + smca_bank_addrs[bank][block] = addr; + return addr; +} - if (!(low & MCI_CONFIG_MCAX)) - return addr; +static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high, + unsigned int bank, unsigned int block) +{ + u32 addr = 0, offset = 0; - if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && - (low & MASK_BLKPTR_LO)) - addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); - } + if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) return addr; - } + + if (mce_flags.smca) + return smca_get_block_address(cpu, bank, block); /* Fall back to method we used for older processors: */ switch (block) { diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 4a038dcf5361..bc1cb284111c 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -625,7 +625,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach); struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) { - struct sg_table *sg_table = ERR_PTR(-EINVAL); + struct sg_table *sg_table; might_sleep(); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index b33935fcf428..e6c6994e74ba 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -176,10 +176,10 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) cz_dpm_powerup_uvd(hwmgr); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); + AMD_CG_STATE_UNGATE); cgs_set_powergating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); + AMD_PG_STATE_UNGATE); cz_dpm_update_uvd_dpm(hwmgr, false); } @@ -208,11 +208,11 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); + AMD_CG_STATE_UNGATE); cgs_set_powergating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); + AMD_PG_STATE_UNGATE); cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, true); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index 261b828ad590..2f3509be226f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -162,7 +162,7 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) AMD_CG_STATE_UNGATE); cgs_set_powergating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); + AMD_PG_STATE_UNGATE); smu7_update_uvd_dpm(hwmgr, false); } diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 08af8d6b844b..493d8f56d14e 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -1139,6 +1139,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]) static const u16 psr_setup_time_us[] = { PSR_SETUP_TIME(330), PSR_SETUP_TIME(275), + PSR_SETUP_TIME(220), PSR_SETUP_TIME(165), PSR_SETUP_TIME(110), PSR_SETUP_TIME(55), diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3b2c0538e48d..90359c7954c8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3378,24 +3378,12 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) return 0; } -static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms) -{ - return wait_for(intel_engine_is_idle(engine), timeout_ms); -} - static int wait_for_engines(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) { - if (GEM_WARN_ON(wait_for_engine(engine, 50))) { - i915_gem_set_wedged(i915); - return -EIO; - } - - GEM_BUG_ON(intel_engine_get_seqno(engine) != - intel_engine_last_submit(engine)); + if (wait_for(intel_engines_are_idle(i915), 50)) { + DRM_ERROR("Failed to idle engines, declaring wedged!\n"); + i915_gem_set_wedged(i915); + return -EIO; } return 0; @@ -4575,7 +4563,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED); - if (ret) + if (ret && ret != -EIO) goto err_unlock; assert_kernel_context_is_current(dev_priv); @@ -4619,11 +4607,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) * machine in an unusable condition. */ i915_gem_sanitize(dev_priv); - goto out_rpm_put; + + intel_runtime_pm_put(dev_priv); + return 0; err_unlock: mutex_unlock(&dev->struct_mutex); -out_rpm_put: intel_runtime_pm_put(dev_priv); return ret; } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 240308f1b6dd..dae4e22a2c3f 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -565,6 +565,36 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, return NOTIFY_OK; } +static int +intel_lvds_connector_register(struct drm_connector *connector) +{ + struct intel_lvds_connector *lvds = to_lvds_connector(connector); + int ret; + + ret = intel_connector_register(connector); + if (ret) + return ret; + + lvds->lid_notifier.notifier_call = intel_lid_notify; + if (acpi_lid_notifier_register(&lvds->lid_notifier)) { + DRM_DEBUG_KMS("lid notifier registration failed\n"); + lvds->lid_notifier.notifier_call = NULL; + } + + return 0; +} + +static void +intel_lvds_connector_unregister(struct drm_connector *connector) +{ + struct intel_lvds_connector *lvds = to_lvds_connector(connector); + + if (lvds->lid_notifier.notifier_call) + acpi_lid_notifier_unregister(&lvds->lid_notifier); + + intel_connector_unregister(connector); +} + /** * intel_lvds_destroy - unregister and free LVDS structures * @connector: connector to free @@ -577,9 +607,6 @@ static void intel_lvds_destroy(struct drm_connector *connector) struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector); - if (lvds_connector->lid_notifier.notifier_call) - acpi_lid_notifier_unregister(&lvds_connector->lid_notifier); - if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) kfree(lvds_connector->base.edid); @@ -600,8 +627,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, - .late_register = intel_connector_register, - .early_unregister = intel_connector_unregister, + .late_register = intel_lvds_connector_register, + .early_unregister = intel_lvds_connector_unregister, .destroy = intel_lvds_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, @@ -818,6 +845,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Radiant P845", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"), + DMI_MATCH(DMI_PRODUCT_NAME, "P845"), + }, + }, { } /* terminating entry */ }; @@ -1149,12 +1184,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; - lvds_connector->lid_notifier.notifier_call = intel_lid_notify; - if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { - DRM_DEBUG_KMS("lid notifier registration failed\n"); - lvds_connector->lid_notifier.notifier_call = NULL; - } - return; failed: diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index dfb57eaa9f22..58ac786634dc 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -741,8 +741,8 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) /* Reset the page to write-back before releasing */ set_memory_wb((unsigned long)win->block[i].bdesc, 1); #endif - dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc, - win->block[i].addr); + dma_free_coherent(msc_dev(msc)->parent->parent, size, + win->block[i].bdesc, win->block[i].addr); } kfree(win); @@ -777,7 +777,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) /* Reset the page to write-back before releasing */ set_memory_wb((unsigned long)win->block[i].bdesc, 1); #endif - dma_free_coherent(msc_dev(win->msc), PAGE_SIZE, + dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, win->block[i].bdesc, win->block[i].addr); } diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index f129869e05a9..736862967e32 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "stm.h" #include @@ -682,7 +683,7 @@ static void stm_device_release(struct device *dev) { struct stm_device *stm = to_stm_device(dev); - kfree(stm); + vfree(stm); } int stm_register_device(struct device *parent, struct stm_data *stm_data, @@ -699,7 +700,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, return -EINVAL; nmasters = stm_data->sw_end - stm_data->sw_start + 1; - stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL); + stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *)); if (!stm) return -ENOMEM; @@ -752,7 +753,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, /* matches device_initialize() above */ put_device(&stm->dev); err_free: - kfree(stm); + vfree(stm); return err; } diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 1d13bf03c758..369a2c632e46 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -158,6 +158,7 @@ config AT91_SAMA5D2_ADC tristate "Atmel AT91 SAMA5D2 ADC" depends on ARCH_AT91 || COMPILE_TEST depends on HAS_IOMEM + select IIO_BUFFER select IIO_TRIGGERED_BUFFER help Say yes here to build support for Atmel SAMA5D2 ADC which is diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index 47c3d7f32900..07246a6037e3 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c @@ -348,55 +348,6 @@ static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39, static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0, 33, 0, 17, 16, 12, 10, 8, 6, 4}; -static ssize_t ad7793_read_frequency(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7793_state *st = iio_priv(indio_dev); - - return sprintf(buf, "%d\n", - st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]); -} - -static ssize_t ad7793_write_frequency(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7793_state *st = iio_priv(indio_dev); - long lval; - int i, ret; - - ret = kstrtol(buf, 10, &lval); - if (ret) - return ret; - - if (lval == 0) - return -EINVAL; - - for (i = 0; i < 16; i++) - if (lval == st->chip_info->sample_freq_avail[i]) - break; - if (i == 16) - return -EINVAL; - - ret = iio_device_claim_direct_mode(indio_dev); - if (ret) - return ret; - st->mode &= ~AD7793_MODE_RATE(-1); - st->mode |= AD7793_MODE_RATE(i); - ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode); - iio_device_release_direct_mode(indio_dev); - - return len; -} - -static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, - ad7793_read_frequency, - ad7793_write_frequency); - static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4"); @@ -424,7 +375,6 @@ static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, ad7793_show_scale_available, NULL, 0); static struct attribute *ad7793_attributes[] = { - &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_const_attr_sampling_frequency_available.dev_attr.attr, &iio_dev_attr_in_m_in_scale_available.dev_attr.attr, NULL @@ -435,7 +385,6 @@ static const struct attribute_group ad7793_attribute_group = { }; static struct attribute *ad7797_attributes[] = { - &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr, NULL }; @@ -505,6 +454,10 @@ static int ad7793_read_raw(struct iio_dev *indio_dev, *val -= offset; } return IIO_VAL_INT; + case IIO_CHAN_INFO_SAMP_FREQ: + *val = st->chip_info + ->sample_freq_avail[AD7793_MODE_RATE(st->mode)]; + return IIO_VAL_INT; } return -EINVAL; } @@ -542,6 +495,26 @@ static int ad7793_write_raw(struct iio_dev *indio_dev, break; } break; + case IIO_CHAN_INFO_SAMP_FREQ: + if (!val) { + ret = -EINVAL; + break; + } + + for (i = 0; i < 16; i++) + if (val == st->chip_info->sample_freq_avail[i]) + break; + + if (i == 16) { + ret = -EINVAL; + break; + } + + st->mode &= ~AD7793_MODE_RATE(-1); + st->mode |= AD7793_MODE_RATE(i); + ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), + st->mode); + break; default: ret = -EINVAL; } diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c index ff03324dee13..0a7289571b68 100644 --- a/drivers/iio/buffer/industrialio-buffer-dma.c +++ b/drivers/iio/buffer/industrialio-buffer-dma.c @@ -587,7 +587,7 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum); * Should be used as the set_length callback for iio_buffer_access_ops * struct for DMA buffers. */ -int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length) +int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length) { /* Avoid an invalid state */ if (length < 2) diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c index 047fe757ab97..70c302a93d7f 100644 --- a/drivers/iio/buffer/kfifo_buf.c +++ b/drivers/iio/buffer/kfifo_buf.c @@ -22,11 +22,18 @@ struct iio_kfifo { #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer) static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, - int bytes_per_datum, int length) + size_t bytes_per_datum, unsigned int length) { if ((length == 0) || (bytes_per_datum == 0)) return -EINVAL; + /* + * Make sure we don't overflow an unsigned int after kfifo rounds up to + * the next power of 2. + */ + if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum) + return -EINVAL; + return __kfifo_alloc((struct __kfifo *)&buf->kf, length, bytes_per_datum, GFP_KERNEL); } @@ -67,7 +74,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd) return 0; } -static int iio_set_length_kfifo(struct iio_buffer *r, int length) +static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length) { /* Avoid an invalid state */ if (length < 2) diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 77515638c55c..896cfd9303b0 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -434,7 +434,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index, return -EINVAL; if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) - return -EAGAIN; + return -EINVAL; memcpy(gid, &table->data_vec[index].gid, sizeof(*gid)); if (attr) { diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index 29f99529b187..cfcb32559925 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c @@ -130,7 +130,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client, bool max_baseline, u8 *value) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, max_baseline ? @@ -149,7 +149,7 @@ static int elan_smbus_get_version(struct i2c_client *client, bool iap, u8 *version) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, iap ? ETP_SMBUS_IAP_VERSION_CMD : @@ -170,7 +170,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, u8 *clickpad) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, ETP_SMBUS_SM_VERSION_CMD, val); @@ -188,7 +188,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, ETP_SMBUS_UNIQUEID_CMD, val); @@ -205,7 +205,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client, bool iap, u16 *csum) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, iap ? ETP_SMBUS_FW_CHECKSUM_CMD : @@ -226,7 +226,7 @@ static int elan_smbus_get_max(struct i2c_client *client, { int ret; int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val); if (ret != 3) { @@ -246,7 +246,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client, { int ret; int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val); if (ret != 3) { @@ -267,7 +267,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client, { int ret; int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val); if (ret != 3) { @@ -294,7 +294,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client, { int error; u16 constant; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val); if (error < 0) { @@ -345,7 +345,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client) int len; int error; enum tp_mode mode; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06}; u16 password; @@ -419,7 +419,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client, struct device *dev = &client->dev; int error; u16 result; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; /* * Due to the limitation of smbus protocol limiting diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index a246fc686bb7..6c4bbd38700e 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -172,6 +172,12 @@ static const char * const smbus_pnp_ids[] = { "LEN0048", /* X1 Carbon 3 */ "LEN0046", /* X250 */ "LEN004a", /* W541 */ + "LEN0071", /* T480 */ + "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ + "LEN0073", /* X1 Carbon G5 (Elantech) */ + "LEN0092", /* X1 Carbon 6 */ + "LEN0096", /* X280 */ + "LEN0097", /* X280 -> ALPS trackpoint */ "LEN200f", /* T450s */ NULL }; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index a2c1ca5c76d1..e1660b92b20c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -372,16 +372,15 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, /* * Determine IFS values - * - Use TXOP_BACKOFF for probe and management frames except beacons + * - Use TXOP_BACKOFF for management frames except beacons * - Use TXOP_SIFS for fragment bursts * - Use TXOP_HTTXOP for everything else * * Note: rt2800 devices won't use CTS protection (if used) * for frames not transmitted with TXOP_HTTXOP */ - if ((ieee80211_is_mgmt(hdr->frame_control) && - !ieee80211_is_beacon(hdr->frame_control)) || - (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) + if (ieee80211_is_mgmt(hdr->frame_control) && + !ieee80211_is_beacon(hdr->frame_control)) txdesc->u.ht.txop = TXOP_BACKOFF; else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) txdesc->u.ht.txop = TXOP_SIFS; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c index 9cff6bc4049c..cf551785eb08 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c @@ -299,9 +299,6 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, writeVal = 0x00000000; if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) writeVal = writeVal - 0x06060606; - else if (rtlpriv->dm.dynamic_txhighpower_lvl == - TXHIGHPWRLEVEL_BT2) - writeVal = writeVal; *(p_outwriteval + rf) = writeVal; } } diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 73b724143be0..c91662927de0 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -531,6 +531,8 @@ struct hv_pci_compl { s32 completion_status; }; +static void hv_pci_onchannelcallback(void *context); + /** * hv_pci_generic_compl() - Invoked for a completion packet * @context: Set up by the sender of the packet. @@ -675,6 +677,31 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, } } +static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev) +{ + u16 ret; + unsigned long flags; + void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + + PCI_VENDOR_ID; + + spin_lock_irqsave(&hpdev->hbus->config_lock, flags); + + /* Choose the function to be read. (See comment above) */ + writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); + /* Make sure the function was chosen before we start reading. */ + mb(); + /* Read from that function's config space. */ + ret = readw(addr); + /* + * mb() is not required here, because the spin_unlock_irqrestore() + * is a barrier. + */ + + spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); + + return ret; +} + /** * _hv_pcifront_write_config() - Internal PCI config write * @hpdev: The PCI driver's representation of the device @@ -1121,8 +1148,37 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) * Since this function is called with IRQ locks held, can't * do normal wait for completion; instead poll. */ - while (!try_wait_for_completion(&comp.comp_pkt.host_event)) + while (!try_wait_for_completion(&comp.comp_pkt.host_event)) { + /* 0xFFFF means an invalid PCI VENDOR ID. */ + if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) { + dev_err_once(&hbus->hdev->device, + "the device has gone\n"); + goto free_int_desc; + } + + /* + * When the higher level interrupt code calls us with + * interrupt disabled, we must poll the channel by calling + * the channel callback directly when channel->target_cpu is + * the current CPU. When the higher level interrupt code + * calls us with interrupt enabled, let's add the + * local_bh_disable()/enable() to avoid race. + */ + local_bh_disable(); + + if (hbus->hdev->channel->target_cpu == smp_processor_id()) + hv_pci_onchannelcallback(hbus); + + local_bh_enable(); + + if (hpdev->state == hv_pcichild_ejecting) { + dev_err_once(&hbus->hdev->device, + "the device is being ejected\n"); + goto free_int_desc; + } + udelay(100); + } if (comp.comp_pkt.completion_status < 0) { dev_err(&hbus->hdev->device, diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 19cd357bb464..ff491da64dab 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -818,7 +818,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) return -EINVAL; chip = &pctrl->chip; - chip->base = -1; + chip->base = 0; chip->ngpio = ngpio; chip->label = dev_name(pctrl->dev); chip->parent = pctrl->dev; diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index 1baf720faf69..87e9747d229a 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -54,7 +54,6 @@ static int ec_response_timed_out(void) static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec, struct cros_ec_command *msg) { - struct ec_host_request *request; struct ec_host_response response; u8 sum; int ret = 0; @@ -65,8 +64,6 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec, /* Write buffer */ cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout); - request = (struct ec_host_request *)ec->dout; - /* Here we go */ sum = EC_COMMAND_PROTOCOL_3; cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_CMD, 1, &sum); diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 36f6190931bc..456ce9f19569 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -51,6 +51,8 @@ struct srp_internal { struct transport_container rport_attr_cont; }; +static int scsi_is_srp_rport(const struct device *dev); + #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t) #define dev_to_rport(d) container_of(d, struct srp_rport, dev) @@ -60,9 +62,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r) return dev_to_shost(r->dev.parent); } +static int find_child_rport(struct device *dev, void *data) +{ + struct device **child = data; + + if (scsi_is_srp_rport(dev)) { + WARN_ON_ONCE(*child); + *child = dev; + } + return 0; +} + static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost) { - return transport_class_to_srp_rport(&shost->shost_gendev); + struct device *child = NULL; + + WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child, + find_child_rport) < 0); + return child ? dev_to_rport(child) : NULL; } /** @@ -600,7 +617,8 @@ enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) struct srp_rport *rport = shost_to_rport(shost); pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); - return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 && + return rport && rport->fast_io_fail_tmo < 0 && + rport->dev_loss_tmo < 0 && i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; } diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c index 8d8659463b3e..feeb17cebc25 100644 --- a/drivers/soc/lantiq/gphy.c +++ b/drivers/soc/lantiq/gphy.c @@ -30,7 +30,6 @@ struct xway_gphy_priv { struct clk *gphy_clk_gate; struct reset_control *gphy_reset; struct reset_control *gphy_reset2; - struct notifier_block gphy_reboot_nb; void __iomem *membase; char *fw_name; }; @@ -64,24 +63,6 @@ static const struct of_device_id xway_gphy_match[] = { }; MODULE_DEVICE_TABLE(of, xway_gphy_match); -static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb) -{ - return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb); -} - -static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb, - unsigned long code, void *unused) -{ - struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb); - - if (priv) { - reset_control_assert(priv->gphy_reset); - reset_control_assert(priv->gphy_reset2); - } - - return NOTIFY_DONE; -} - static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv, dma_addr_t *dev_addr) { @@ -205,14 +186,6 @@ static int xway_gphy_probe(struct platform_device *pdev) reset_control_deassert(priv->gphy_reset); reset_control_deassert(priv->gphy_reset2); - /* assert the gphy reset because it can hang after a reboot: */ - priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify; - priv->gphy_reboot_nb.priority = -1; - - ret = register_reboot_notifier(&priv->gphy_reboot_nb); - if (ret) - dev_warn(dev, "Failed to register reboot notifier\n"); - platform_set_drvdata(pdev, priv); return ret; @@ -220,21 +193,12 @@ static int xway_gphy_probe(struct platform_device *pdev) static int xway_gphy_remove(struct platform_device *pdev) { - struct device *dev = &pdev->dev; struct xway_gphy_priv *priv = platform_get_drvdata(pdev); - int ret; - - reset_control_assert(priv->gphy_reset); - reset_control_assert(priv->gphy_reset2); iowrite32be(0, priv->membase); clk_disable_unprepare(priv->gphy_clk_gate); - ret = unregister_reboot_notifier(&priv->gphy_reboot_nb); - if (ret) - dev_warn(dev, "Failed to unregister reboot notifier\n"); - return 0; } diff --git a/fs/aio.c b/fs/aio.c index 4e23958c2509..3a749c3a92e3 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -643,9 +643,8 @@ static void free_ioctx_users(struct percpu_ref *ref) while (!list_empty(&ctx->active_reqs)) { req = list_first_entry(&ctx->active_reqs, struct aio_kiocb, ki_list); - - list_del_init(&req->ki_list); kiocb_cancel(req); + list_del_init(&req->ki_list); } spin_unlock_irq(&ctx->ctx_lock); diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index f965ce832bc0..516e0c57cf9c 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -52,6 +52,23 @@ STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *); STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *, xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *); +/* + * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in + * the beginning of the block for a proper header with the location information + * and CRC. + */ +unsigned int +xfs_agfl_size( + struct xfs_mount *mp) +{ + unsigned int size = mp->m_sb.sb_sectsize; + + if (xfs_sb_version_hascrc(&mp->m_sb)) + size -= sizeof(struct xfs_agfl); + + return size / sizeof(xfs_agblock_t); +} + unsigned int xfs_refc_block( struct xfs_mount *mp) @@ -540,7 +557,7 @@ xfs_agfl_verify( if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno) return false; - for (i = 0; i < XFS_AGFL_SIZE(mp); i++) { + for (i = 0; i < xfs_agfl_size(mp); i++) { if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK && be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks) return false; @@ -2039,6 +2056,93 @@ xfs_alloc_space_available( return true; } +/* + * Check the agfl fields of the agf for inconsistency or corruption. The purpose + * is to detect an agfl header padding mismatch between current and early v5 + * kernels. This problem manifests as a 1-slot size difference between the + * on-disk flcount and the active [first, last] range of a wrapped agfl. This + * may also catch variants of agfl count corruption unrelated to padding. Either + * way, we'll reset the agfl and warn the user. + * + * Return true if a reset is required before the agfl can be used, false + * otherwise. + */ +static bool +xfs_agfl_needs_reset( + struct xfs_mount *mp, + struct xfs_agf *agf) +{ + uint32_t f = be32_to_cpu(agf->agf_flfirst); + uint32_t l = be32_to_cpu(agf->agf_fllast); + uint32_t c = be32_to_cpu(agf->agf_flcount); + int agfl_size = xfs_agfl_size(mp); + int active; + + /* no agfl header on v4 supers */ + if (!xfs_sb_version_hascrc(&mp->m_sb)) + return false; + + /* + * The agf read verifier catches severe corruption of these fields. + * Repeat some sanity checks to cover a packed -> unpacked mismatch if + * the verifier allows it. + */ + if (f >= agfl_size || l >= agfl_size) + return true; + if (c > agfl_size) + return true; + + /* + * Check consistency between the on-disk count and the active range. An + * agfl padding mismatch manifests as an inconsistent flcount. + */ + if (c && l >= f) + active = l - f + 1; + else if (c) + active = agfl_size - f + l + 1; + else + active = 0; + + return active != c; +} + +/* + * Reset the agfl to an empty state. Ignore/drop any existing blocks since the + * agfl content cannot be trusted. Warn the user that a repair is required to + * recover leaked blocks. + * + * The purpose of this mechanism is to handle filesystems affected by the agfl + * header padding mismatch problem. A reset keeps the filesystem online with a + * relatively minor free space accounting inconsistency rather than suffer the + * inevitable crash from use of an invalid agfl block. + */ +static void +xfs_agfl_reset( + struct xfs_trans *tp, + struct xfs_buf *agbp, + struct xfs_perag *pag) +{ + struct xfs_mount *mp = tp->t_mountp; + struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); + + ASSERT(pag->pagf_agflreset); + trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_); + + xfs_warn(mp, + "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. " + "Please unmount and run xfs_repair.", + pag->pag_agno, pag->pagf_flcount); + + agf->agf_flfirst = 0; + agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1); + agf->agf_flcount = 0; + xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST | + XFS_AGF_FLCOUNT); + + pag->pagf_flcount = 0; + pag->pagf_agflreset = false; +} + /* * Decide whether to use this allocation group for this allocation. * If so, fix up the btree freelist's size. @@ -2100,6 +2204,10 @@ xfs_alloc_fix_freelist( } } + /* reset a padding mismatched agfl before final free space check */ + if (pag->pagf_agflreset) + xfs_agfl_reset(tp, agbp, pag); + /* If there isn't enough total space or single-extent, reject it. */ need = xfs_alloc_min_freelist(mp, pag); if (!xfs_alloc_space_available(args, need, flags)) @@ -2252,10 +2360,11 @@ xfs_alloc_get_freelist( bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]); be32_add_cpu(&agf->agf_flfirst, 1); xfs_trans_brelse(tp, agflbp); - if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp)) + if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp)) agf->agf_flfirst = 0; pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); + ASSERT(!pag->pagf_agflreset); be32_add_cpu(&agf->agf_flcount, -1); xfs_trans_agflist_delta(tp, -1); pag->pagf_flcount--; @@ -2363,10 +2472,11 @@ xfs_alloc_put_freelist( be32_to_cpu(agf->agf_seqno), &agflbp))) return error; be32_add_cpu(&agf->agf_fllast, 1); - if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp)) + if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp)) agf->agf_fllast = 0; pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); + ASSERT(!pag->pagf_agflreset); be32_add_cpu(&agf->agf_flcount, 1); xfs_trans_agflist_delta(tp, 1); pag->pagf_flcount++; @@ -2381,7 +2491,7 @@ xfs_alloc_put_freelist( xfs_alloc_log_agf(tp, agbp, logflags); - ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)); + ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)); agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp); blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)]; @@ -2414,9 +2524,9 @@ xfs_agf_verify( if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) && XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) && be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) && - be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) && - be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) && - be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp))) + be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) && + be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) && + be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp))) return false; if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 || @@ -2572,6 +2682,7 @@ xfs_alloc_read_agf( pag->pagb_count = 0; pag->pagb_tree = RB_ROOT; pag->pagf_init = 1; + pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf); } #ifdef DEBUG else if (!XFS_FORCED_SHUTDOWN(mp)) { diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h index ef26edc2e938..346ba8ab68b5 100644 --- a/fs/xfs/libxfs/xfs_alloc.h +++ b/fs/xfs/libxfs/xfs_alloc.h @@ -26,6 +26,8 @@ struct xfs_trans; extern struct workqueue_struct *xfs_alloc_wq; +unsigned int xfs_agfl_size(struct xfs_mount *mp); + /* * Freespace allocation types. Argument to xfs_alloc_[v]extent. */ diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h index 23229f0c5b15..ed4481b2f113 100644 --- a/fs/xfs/libxfs/xfs_format.h +++ b/fs/xfs/libxfs/xfs_format.h @@ -798,24 +798,13 @@ typedef struct xfs_agi { &(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \ (__be32 *)(bp)->b_addr) -/* - * Size of the AGFL. For CRC-enabled filesystes we steal a couple of - * slots in the beginning of the block for a proper header with the - * location information and CRC. - */ -#define XFS_AGFL_SIZE(mp) \ - (((mp)->m_sb.sb_sectsize - \ - (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \ - sizeof(struct xfs_agfl) : 0)) / \ - sizeof(xfs_agblock_t)) - typedef struct xfs_agfl { __be32 agfl_magicnum; __be32 agfl_seqno; uuid_t agfl_uuid; __be64 agfl_lsn; __be32 agfl_crc; - __be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */ + __be32 agfl_bno[]; /* actually xfs_agfl_size(mp) */ } __attribute__((packed)) xfs_agfl_t; #define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc) diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 8f22fc579dbb..40783a313df9 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -294,7 +294,7 @@ xfs_growfs_data_private( } agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp); - for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) + for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++) agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); error = xfs_bwrite(bp); diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index e0792d036be2..d359a88ea249 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -353,6 +353,7 @@ typedef struct xfs_perag { char pagi_inodeok; /* The agi is ok for inodes */ uint8_t pagf_levels[XFS_BTNUM_AGF]; /* # of levels in bno & cnt btree */ + bool pagf_agflreset; /* agfl requires reset before use */ uint32_t pagf_flcount; /* count of blocks in freelist */ xfs_extlen_t pagf_freeblks; /* total free blocks */ xfs_extlen_t pagf_longest; /* longest free space */ diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index bb5514688d47..06bc87369632 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -1513,7 +1513,7 @@ TRACE_EVENT(xfs_extent_busy_trim, __entry->tlen) ); -TRACE_EVENT(xfs_agf, +DECLARE_EVENT_CLASS(xfs_agf_class, TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, unsigned long caller_ip), TP_ARGS(mp, agf, flags, caller_ip), @@ -1569,6 +1569,13 @@ TRACE_EVENT(xfs_agf, __entry->longest, (void *)__entry->caller_ip) ); +#define DEFINE_AGF_EVENT(name) \ +DEFINE_EVENT(xfs_agf_class, name, \ + TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, \ + unsigned long caller_ip), \ + TP_ARGS(mp, agf, flags, caller_ip)) +DEFINE_AGF_EVENT(xfs_agf); +DEFINE_AGF_EVENT(xfs_agfl_reset); TRACE_EVENT(xfs_free_extent, TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h index b9e22b7e2f28..d1171db23742 100644 --- a/include/linux/iio/buffer_impl.h +++ b/include/linux/iio/buffer_impl.h @@ -53,7 +53,7 @@ struct iio_buffer_access_funcs { int (*request_update)(struct iio_buffer *buffer); int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); - int (*set_length)(struct iio_buffer *buffer, int length); + int (*set_length)(struct iio_buffer *buffer, unsigned int length); int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); @@ -72,10 +72,10 @@ struct iio_buffer_access_funcs { */ struct iio_buffer { /** @length: Number of datums in buffer. */ - int length; + unsigned int length; /** @bytes_per_datum: Size of individual datum including timestamp. */ - int bytes_per_datum; + size_t bytes_per_datum; /** * @access: Buffer access functions associated with the diff --git a/include/linux/tcp.h b/include/linux/tcp.h index e8418fc77a43..fe322fa611e6 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -334,7 +334,7 @@ struct tcp_sock { /* Receiver queue space */ struct { - int space; + u32 space; u32 seq; u64 time; } rcvq_space; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 3fab6c81917f..f41ea5af22ee 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2604,7 +2604,7 @@ enum nl80211_attrs { #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS -#define NL80211_WIPHY_NAME_MAXLEN 128 +#define NL80211_WIPHY_NAME_MAXLEN 64 #define NL80211_MAX_SUPP_RATES 32 #define NL80211_MAX_SUPP_HT_RATES 77 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 76bcc80b893e..520ecaf61dc4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -894,7 +894,7 @@ int __trace_bputs(unsigned long ip, const char *str) EXPORT_SYMBOL_GPL(__trace_bputs); #ifdef CONFIG_TRACER_SNAPSHOT -static void tracing_snapshot_instance(struct trace_array *tr) +void tracing_snapshot_instance(struct trace_array *tr) { struct tracer *tracer = tr->current_trace; unsigned long flags; @@ -950,7 +950,7 @@ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, struct trace_buffer *size_buf, int cpu_id); static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); -static int alloc_snapshot(struct trace_array *tr) +int tracing_alloc_snapshot_instance(struct trace_array *tr) { int ret; @@ -996,7 +996,7 @@ int tracing_alloc_snapshot(void) struct trace_array *tr = &global_trace; int ret; - ret = alloc_snapshot(tr); + ret = tracing_alloc_snapshot_instance(tr); WARN_ON(ret < 0); return ret; @@ -5400,7 +5400,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf) #ifdef CONFIG_TRACER_MAX_TRACE if (t->use_max_tr && !had_max_tr) { - ret = alloc_snapshot(tr); + ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; } @@ -6378,7 +6378,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, } #endif if (!tr->allocated_snapshot) { - ret = alloc_snapshot(tr); + ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) break; } @@ -7099,7 +7099,7 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, return ret; out_reg: - ret = alloc_snapshot(tr); + ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 401b0639116f..851cd1605085 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1807,6 +1807,17 @@ static inline void __init trace_event_init(void) { } static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } #endif +#ifdef CONFIG_TRACER_SNAPSHOT +void tracing_snapshot_instance(struct trace_array *tr); +int tracing_alloc_snapshot_instance(struct trace_array *tr); +#else +static inline void tracing_snapshot_instance(struct trace_array *tr) { } +static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) +{ + return 0; +} +#endif + extern struct trace_iterator *tracepoint_print_iter; #endif /* _LINUX_KERNEL_TRACE_H */ diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index f2ac9d44f6c4..b413fab7d75b 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -482,9 +482,10 @@ clear_event_triggers(struct trace_array *tr) struct trace_event_file *file; list_for_each_entry(file, &tr->events, list) { - struct event_trigger_data *data; - list_for_each_entry_rcu(data, &file->triggers, list) { + struct event_trigger_data *data, *n; + list_for_each_entry_safe(data, n, &file->triggers, list) { trace_event_trigger_enable_disable(file, 0); + list_del_rcu(&data->list); if (data->ops->free) data->ops->free(data->ops, data); } @@ -641,6 +642,7 @@ event_trigger_callback(struct event_command *cmd_ops, trigger_data->count = -1; trigger_data->ops = trigger_ops; trigger_data->cmd_ops = cmd_ops; + trigger_data->private_data = file; INIT_LIST_HEAD(&trigger_data->list); INIT_LIST_HEAD(&trigger_data->named_list); @@ -1041,7 +1043,12 @@ static struct event_command trigger_traceoff_cmd = { static void snapshot_trigger(struct event_trigger_data *data, void *rec) { - tracing_snapshot(); + struct trace_event_file *file = data->private_data; + + if (file) + tracing_snapshot_instance(file->tr); + else + tracing_snapshot(); } static void @@ -1063,7 +1070,7 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, { int ret = register_trigger(glob, ops, data, file); - if (ret > 0 && tracing_alloc_snapshot() != 0) { + if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) { unregister_trigger(glob, ops, data, file); ret = 0; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e774898c91d5..8af604f3b370 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2388,7 +2388,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, __split_huge_page_tail(head, i, lruvec, list); /* Some pages can be beyond i_size: drop them from page cache */ if (head[i].index >= end) { - __ClearPageDirty(head + i); + ClearPageDirty(head + i); __delete_from_page_cache(head + i, NULL); if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) shmem_uncharge(head->mapping->host, 1); diff --git a/mm/vmscan.c b/mm/vmscan.c index 1a581468a9cf..be56e2e1931e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1451,7 +1451,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode) return ret; mapping = page_mapping(page); - migrate_dirty = mapping && mapping->a_ops->migratepage; + migrate_dirty = !mapping || mapping->a_ops->migratepage; unlock_page(page); if (!migrate_dirty) return ret; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ebbb54bcbcac..125b49c166a4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -591,8 +591,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, void tcp_rcv_space_adjust(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); + u32 copied; int time; - int copied; tcp_mstamp_refresh(tp); time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); @@ -615,12 +615,13 @@ void tcp_rcv_space_adjust(struct sock *sk) if (sysctl_tcp_moderate_rcvbuf && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { - int rcvwin, rcvmem, rcvbuf; + int rcvmem, rcvbuf; + u64 rcvwin; /* minimal window to cope with packet losses, assuming * steady state. Add some cushion because of small variations. */ - rcvwin = (copied << 1) + 16 * tp->advmss; + rcvwin = ((u64)copied << 1) + 16 * tp->advmss; /* If rate increased by 25%, * assume slow start, rcvwin = 3 * copied @@ -640,7 +641,8 @@ void tcp_rcv_space_adjust(struct sock *sk) while (tcp_win_from_space(rcvmem) < tp->advmss) rcvmem += 128; - rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]); + do_div(rcvwin, tp->advmss); + rcvbuf = min_t(u64, rcvwin * rcvmem, sysctl_tcp_rmem[2]); if (rcvbuf > sk->sk_rcvbuf) { sk->sk_rcvbuf = rcvbuf; diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index c9c031e3d1ae..b275743e23cc 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -1448,7 +1448,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len, scontext_len, &context, def_sid); if (rc == -EINVAL && force) { context.str = str; - context.len = scontext_len; + context.len = strlen(str) + 1; str = NULL; } else if (rc) goto out_unlock; diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c index a086c35f91bb..79a9fdf94d38 100644 --- a/sound/soc/intel/common/sst-firmware.c +++ b/sound/soc/intel/common/sst-firmware.c @@ -274,7 +274,6 @@ int sst_dma_new(struct sst_dsp *sst) struct sst_pdata *sst_pdata = sst->pdata; struct sst_dma *dma; struct resource mem; - const char *dma_dev_name; int ret = 0; if (sst->pdata->resindex_dma_base == -1) @@ -285,7 +284,6 @@ int sst_dma_new(struct sst_dsp *sst) * is attached to the ADSP IP. */ switch (sst->pdata->dma_engine) { case SST_DMA_TYPE_DW: - dma_dev_name = "dw_dmac"; break; default: dev_err(sst->dev, "error: invalid DMA engine %d\n", diff --git a/tools/objtool/check.c b/tools/objtool/check.c index c8b8b7101c6f..e128d1c71c30 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -59,6 +59,31 @@ static struct instruction *next_insn_same_sec(struct objtool_file *file, return next; } +static struct instruction *next_insn_same_func(struct objtool_file *file, + struct instruction *insn) +{ + struct instruction *next = list_next_entry(insn, list); + struct symbol *func = insn->func; + + if (!func) + return NULL; + + if (&next->list != &file->insn_list && next->func == func) + return next; + + /* Check if we're already in the subfunction: */ + if (func == func->cfunc) + return NULL; + + /* Move to the subfunction: */ + return find_insn(file, func->cfunc->sec, func->cfunc->offset); +} + +#define func_for_each_insn_all(file, func, insn) \ + for (insn = find_insn(file, func->sec, func->offset); \ + insn; \ + insn = next_insn_same_func(file, insn)) + #define func_for_each_insn(file, func, insn) \ for (insn = find_insn(file, func->sec, func->offset); \ insn && &insn->list != &file->insn_list && \ @@ -148,10 +173,14 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, if (!strcmp(func->name, global_noreturns[i])) return 1; - if (!func->sec) + if (!func->len) return 0; - func_for_each_insn(file, func, insn) { + insn = find_insn(file, func->sec, func->offset); + if (!insn->func) + return 0; + + func_for_each_insn_all(file, func, insn) { empty = false; if (insn->type == INSN_RETURN) @@ -166,35 +195,28 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, * case, the function's dead-end status depends on whether the target * of the sibling call returns. */ - func_for_each_insn(file, func, insn) { - if (insn->sec != func->sec || - insn->offset >= func->offset + func->len) - break; - + func_for_each_insn_all(file, func, insn) { if (insn->type == INSN_JUMP_UNCONDITIONAL) { struct instruction *dest = insn->jump_dest; - struct symbol *dest_func; if (!dest) /* sibling call to another file */ return 0; - if (dest->sec != func->sec || - dest->offset < func->offset || - dest->offset >= func->offset + func->len) { - /* local sibling call */ - dest_func = find_symbol_by_offset(dest->sec, - dest->offset); - if (!dest_func) - continue; + if (dest->func && dest->func->pfunc != insn->func->pfunc) { + /* local sibling call */ if (recursion == 5) { - WARN_FUNC("infinite recursion (objtool bug!)", - dest->sec, dest->offset); - return -1; + /* + * Infinite recursion: two functions + * have sibling calls to each other. + * This is a very rare case. It means + * they aren't dead ends. + */ + return 0; } - return __dead_end_function(file, dest_func, + return __dead_end_function(file, dest->func, recursion + 1); } } @@ -421,7 +443,7 @@ static void add_ignores(struct objtool_file *file) if (!ignore_func(file, func)) continue; - func_for_each_insn(file, func, insn) + func_for_each_insn_all(file, func, insn) insn->ignore = true; } } @@ -781,30 +803,35 @@ static int add_special_section_alts(struct objtool_file *file) return ret; } -static int add_switch_table(struct objtool_file *file, struct symbol *func, - struct instruction *insn, struct rela *table, - struct rela *next_table) +static int add_switch_table(struct objtool_file *file, struct instruction *insn, + struct rela *table, struct rela *next_table) { struct rela *rela = table; struct instruction *alt_insn; struct alternative *alt; + struct symbol *pfunc = insn->func->pfunc; + unsigned int prev_offset = 0; list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) { if (rela == next_table) break; - if (rela->sym->sec != insn->sec || - rela->addend <= func->offset || - rela->addend >= func->offset + func->len) + /* Make sure the switch table entries are consecutive: */ + if (prev_offset && rela->offset != prev_offset + 8) break; - alt_insn = find_insn(file, insn->sec, rela->addend); - if (!alt_insn) { - WARN("%s: can't find instruction at %s+0x%x", - file->rodata->rela->name, insn->sec->name, - rela->addend); - return -1; - } + /* Detect function pointers from contiguous objects: */ + if (rela->sym->sec == pfunc->sec && + rela->addend == pfunc->offset) + break; + + alt_insn = find_insn(file, rela->sym->sec, rela->addend); + if (!alt_insn) + break; + + /* Make sure the jmp dest is in the function or subfunction: */ + if (alt_insn->func->pfunc != pfunc) + break; alt = malloc(sizeof(*alt)); if (!alt) { @@ -814,6 +841,13 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func, alt->insn = alt_insn; list_add_tail(&alt->list, &insn->alts); + prev_offset = rela->offset; + } + + if (!prev_offset) { + WARN_FUNC("can't find switch jump table", + insn->sec, insn->offset); + return -1; } return 0; @@ -868,40 +902,21 @@ static struct rela *find_switch_table(struct objtool_file *file, { struct rela *text_rela, *rodata_rela; struct instruction *orig_insn = insn; + unsigned long table_offset; - text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len); - if (text_rela && text_rela->sym == file->rodata->sym) { - /* case 1 */ - rodata_rela = find_rela_by_dest(file->rodata, - text_rela->addend); - if (rodata_rela) - return rodata_rela; - - /* case 2 */ - rodata_rela = find_rela_by_dest(file->rodata, - text_rela->addend + 4); - if (!rodata_rela) - return NULL; - - file->ignore_unreachables = true; - return rodata_rela; - } - - /* case 3 */ /* * Backward search using the @first_jump_src links, these help avoid * much of the 'in between' code. Which avoids us getting confused by * it. */ - for (insn = list_prev_entry(insn, list); - + for (; &insn->list != &file->insn_list && insn->sec == func->sec && insn->offset >= func->offset; insn = insn->first_jump_src ?: list_prev_entry(insn, list)) { - if (insn->type == INSN_JUMP_DYNAMIC) + if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) break; /* allow small jumps within the range */ @@ -917,18 +932,29 @@ static struct rela *find_switch_table(struct objtool_file *file, if (!text_rela || text_rela->sym != file->rodata->sym) continue; + table_offset = text_rela->addend; + if (text_rela->type == R_X86_64_PC32) + table_offset += 4; + /* * Make sure the .rodata address isn't associated with a * symbol. gcc jump tables are anonymous data. */ - if (find_symbol_containing(file->rodata, text_rela->addend)) + if (find_symbol_containing(file->rodata, table_offset)) continue; - rodata_rela = find_rela_by_dest(file->rodata, text_rela->addend); - if (!rodata_rela) - continue; + rodata_rela = find_rela_by_dest(file->rodata, table_offset); + if (rodata_rela) { + /* + * Use of RIP-relative switch jumps is quite rare, and + * indicates a rare GCC quirk/bug which can leave dead + * code behind. + */ + if (text_rela->type == R_X86_64_PC32) + file->ignore_unreachables = true; - return rodata_rela; + return rodata_rela; + } } return NULL; @@ -942,7 +968,7 @@ static int add_func_switch_tables(struct objtool_file *file, struct rela *rela, *prev_rela = NULL; int ret; - func_for_each_insn(file, func, insn) { + func_for_each_insn_all(file, func, insn) { if (!last) last = insn; @@ -973,8 +999,7 @@ static int add_func_switch_tables(struct objtool_file *file, * the beginning of another switch table in the same function. */ if (prev_jump) { - ret = add_switch_table(file, func, prev_jump, prev_rela, - rela); + ret = add_switch_table(file, prev_jump, prev_rela, rela); if (ret) return ret; } @@ -984,7 +1009,7 @@ static int add_func_switch_tables(struct objtool_file *file, } if (prev_jump) { - ret = add_switch_table(file, func, prev_jump, prev_rela, NULL); + ret = add_switch_table(file, prev_jump, prev_rela, NULL); if (ret) return ret; } @@ -1748,15 +1773,13 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, while (1) { next_insn = next_insn_same_sec(file, insn); - - if (file->c_file && func && insn->func && func != insn->func) { + if (file->c_file && func && insn->func && func != insn->func->pfunc) { WARN("%s() falls through to next function %s()", func->name, insn->func->name); return 1; } - if (insn->func) - func = insn->func; + func = insn->func ? insn->func->pfunc : NULL; if (func && insn->ignore) { WARN_FUNC("BUG: why am I validating an ignored function?", @@ -1777,7 +1800,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, i = insn; save_insn = NULL; - func_for_each_insn_continue_reverse(file, func, i) { + func_for_each_insn_continue_reverse(file, insn->func, i) { if (i->save) { save_insn = i; break; @@ -1864,7 +1887,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, case INSN_JUMP_UNCONDITIONAL: if (insn->jump_dest && (!func || !insn->jump_dest->func || - func == insn->jump_dest->func)) { + insn->jump_dest->func->pfunc == func)) { ret = validate_branch(file, insn->jump_dest, state); if (ret) @@ -2059,7 +2082,7 @@ static int validate_functions(struct objtool_file *file) for_each_sec(file, sec) { list_for_each_entry(func, &sec->symbol_list, list) { - if (func->type != STT_FUNC) + if (func->type != STT_FUNC || func->pfunc != func) continue; insn = find_insn(file, sec, func->offset); diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index c1c338661699..4e60e105583e 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -79,6 +79,19 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset) return NULL; } +struct symbol *find_symbol_by_name(struct elf *elf, const char *name) +{ + struct section *sec; + struct symbol *sym; + + list_for_each_entry(sec, &elf->sections, list) + list_for_each_entry(sym, &sec->symbol_list, list) + if (!strcmp(sym->name, name)) + return sym; + + return NULL; +} + struct symbol *find_symbol_containing(struct section *sec, unsigned long offset) { struct symbol *sym; @@ -203,10 +216,11 @@ static int read_sections(struct elf *elf) static int read_symbols(struct elf *elf) { - struct section *symtab; - struct symbol *sym; + struct section *symtab, *sec; + struct symbol *sym, *pfunc; struct list_head *entry, *tmp; int symbols_nr, i; + char *coldstr; symtab = find_section_by_name(elf, ".symtab"); if (!symtab) { @@ -281,6 +295,30 @@ static int read_symbols(struct elf *elf) hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx); } + /* Create parent/child links for any cold subfunctions */ + list_for_each_entry(sec, &elf->sections, list) { + list_for_each_entry(sym, &sec->symbol_list, list) { + if (sym->type != STT_FUNC) + continue; + sym->pfunc = sym->cfunc = sym; + coldstr = strstr(sym->name, ".cold."); + if (coldstr) { + coldstr[0] = '\0'; + pfunc = find_symbol_by_name(elf, sym->name); + coldstr[0] = '.'; + + if (!pfunc) { + WARN("%s(): can't find parent function", + sym->name); + goto err; + } + + sym->pfunc = pfunc; + pfunc->cfunc = sym; + } + } + } + return 0; err: diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index d86e2ff14466..de5cd2ddded9 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -61,6 +61,7 @@ struct symbol { unsigned char bind, type; unsigned long offset; unsigned int len; + struct symbol *pfunc, *cfunc; }; struct rela { @@ -86,6 +87,7 @@ struct elf { struct elf *elf_open(const char *name, int flags); struct section *find_section_by_name(struct elf *elf, const char *name); struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset); +struct symbol *find_symbol_by_name(struct elf *elf, const char *name); struct symbol *find_symbol_containing(struct section *sec, unsigned long offset); struct rela *find_rela_by_dest(struct section *sec, unsigned long offset); struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,