diff --git a/0001-apply-preempt-RT-patch.patch b/0001-apply-preempt-RT-patch.patch index 053afc5..865b41f 100644 --- a/0001-apply-preempt-RT-patch.patch +++ b/0001-apply-preempt-RT-patch.patch @@ -1,7 +1,7 @@ -From 069d7f9ad6878ef306e0e15efb50cede4e996d0a Mon Sep 17 00:00:00 2001 +From b9f37e1ab5342d6e20ebf05c092c63fac67a0739 Mon Sep 17 00:00:00 2001 From: zhangyu -Date: Tue, 31 Oct 2023 14:16:34 +0800 -Subject: [PATCH] apply-preempt-RT-patch +Date: Thu, 14 Dec 2023 11:16:10 +0800 +Subject: [PATCH] apply-preempt-RT-patch.patch.patch --- .../Expedited-Grace-Periods.rst | 4 +- @@ -43,7 +43,7 @@ Subject: [PATCH] apply-preempt-RT-patch arch/arm64/include/asm/thread_info.h | 8 +- arch/arm64/kernel/asm-offsets.c | 1 + arch/arm64/kernel/entry.S | 13 +- - arch/arm64/kernel/fpsimd.c | 14 +- + arch/arm64/kernel/fpsimd.c | 13 +- arch/arm64/kernel/ipi_nmi.c | 2 - arch/arm64/kernel/signal.c | 2 +- arch/arm64/kvm/arm.c | 6 +- @@ -184,6 +184,7 @@ Subject: [PATCH] apply-preempt-RT-patch drivers/gpu/drm/vmwgfx/vmwgfx_blit.c | 30 +- drivers/hv/hyperv_vmbus.h | 1 + drivers/hv/vmbus_drv.c | 8 +- + drivers/irqchip/irq-gic-v4.c | 2 +- drivers/leds/trigger/Kconfig | 1 + drivers/md/raid5.c | 7 +- drivers/md/raid5.h | 1 + @@ -226,7 +227,7 @@ Subject: [PATCH] apply-preempt-RT-patch fs/nfs/dir.c | 4 +- fs/nfs/unlink.c | 4 +- fs/proc/array.c | 4 +- - fs/proc/base.c | 3 +- + fs/proc/base.c | 4 +- fs/proc/proc_sysctl.c | 2 +- fs/pstore/platform.c | 5 +- include/asm-generic/Kbuild | 2 +- @@ -246,8 +247,8 @@ Subject: [PATCH] apply-preempt-RT-patch include/linux/eventfd.h | 11 +- include/linux/fs.h | 2 +- include/linux/hardirq.h | 7 +- - include/linux/highmem-internal.h | 222 +++ - include/linux/highmem.h | 294 ++- + include/linux/highmem-internal.h | 222 ++ + include/linux/highmem.h | 294 +-- include/linux/interrupt.h | 34 +- include/linux/io-mapping.h | 28 +- include/linux/irq_cpustat.h | 28 - @@ -269,12 +270,12 @@ Subject: [PATCH] apply-preempt-RT-patch include/linux/rbtree_type.h | 31 + include/linux/rcupdate.h | 10 +- include/linux/rtmutex.h | 46 +- - include/linux/rwlock_rt.h | 109 ++ + include/linux/rwlock_rt.h | 109 + include/linux/rwlock_types.h | 4 + include/linux/rwlock_types_rt.h | 56 + include/linux/rwsem-rt.h | 70 + include/linux/rwsem.h | 12 + - include/linux/sched.h | 123 +- + include/linux/sched.h | 127 +- include/linux/sched/hotplug.h | 2 + include/linux/sched/mm.h | 11 + include/linux/sched/rt.h | 8 - @@ -325,13 +326,13 @@ Subject: [PATCH] apply-preempt-RT-patch kernel/kthread.c | 16 +- kernel/locking/Makefile | 10 +- kernel/locking/lockdep.c | 2 + - kernel/locking/mutex-rt.c | 224 +++ + kernel/locking/mutex-rt.c | 224 ++ kernel/locking/rtmutex-debug.c | 102 - kernel/locking/rtmutex-debug.h | 11 - kernel/locking/rtmutex.c | 939 +++++++-- kernel/locking/rtmutex.h | 7 - kernel/locking/rtmutex_common.h | 36 +- - kernel/locking/rwlock-rt.c | 334 ++++ + kernel/locking/rwlock-rt.c | 334 +++ kernel/locking/rwsem-rt.c | 317 +++ kernel/locking/rwsem.c | 6 + kernel/locking/spinlock.c | 7 + @@ -340,7 +341,7 @@ Subject: [PATCH] apply-preempt-RT-patch kernel/panic.c | 32 +- kernel/printk/Makefile | 1 - kernel/printk/internal.h | 4 - - kernel/printk/printk.c | 1708 +++++++++-------- + kernel/printk/printk.c | 1797 +++++++++-------- kernel/printk/printk_safe.c | 349 +--- kernel/ptrace.c | 32 +- kernel/rcu/Kconfig | 4 +- @@ -358,7 +359,7 @@ Subject: [PATCH] apply-preempt-RT-patch kernel/sched/swait.c | 1 + kernel/signal.c | 105 +- kernel/smp.c | 14 +- - kernel/softirq.c | 431 ++++- + kernel/softirq.c | 431 +++- kernel/stop_machine.c | 27 +- kernel/time/hrtimer.c | 30 + kernel/time/tick-sched.c | 2 +- @@ -371,7 +372,7 @@ Subject: [PATCH] apply-preempt-RT-patch lib/Kconfig.debug | 2 +- lib/bug.c | 1 + lib/cpumask.c | 18 + - lib/debugobjects.c | 5 +- + lib/debugobjects.c | 4 +- lib/dump_stack.c | 2 + lib/irq_poll.c | 5 + lib/locking-selftest.c | 51 + @@ -401,7 +402,7 @@ Subject: [PATCH] apply-preempt-RT-patch net/sched/sch_generic.c | 10 + net/sunrpc/svc_xprt.c | 4 +- net/xfrm/xfrm_state.c | 3 +- - 397 files changed, 8930 insertions(+), 4789 deletions(-) + 398 files changed, 8964 insertions(+), 4849 deletions(-) delete mode 100644 arch/alpha/include/asm/kmap_types.h delete mode 100644 arch/arc/include/asm/kmap_types.h delete mode 100644 arch/arm/include/asm/kmap_types.h @@ -597,7 +598,7 @@ index f64f4413a..3b4a24877 100644 Therefore, on_each_cpu() disables preemption across its call to smp_call_function() and also across the local call to diff --git a/Documentation/RCU/stallwarn.rst b/Documentation/RCU/stallwarn.rst -index c9ab6af4d..e97d1b487 100644 +index d53856cc6..7148e9be0 100644 --- a/Documentation/RCU/stallwarn.rst +++ b/Documentation/RCU/stallwarn.rst @@ -25,7 +25,7 @@ warnings: @@ -663,7 +664,7 @@ index fb3ff76c3..3b2b1479f 100644 read-side critical sections. It also permits spinlocks blocking while in RCU read-side critical diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt -index 71567aa7e..ee4911a95 100644 +index a3feab139..50ab4d713 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4397,6 +4397,10 @@ @@ -677,7 +678,7 @@ index 71567aa7e..ee4911a95 100644 rcutree.rcu_fanout_exact= [KNL] Disable autobalancing of the rcu_node combining tree. This is used by rcutorture, and might -@@ -4775,6 +4779,13 @@ +@@ -4781,6 +4785,13 @@ only normal grace-period primitives. No effect on CONFIG_TINY_RCU kernels. @@ -814,7 +815,7 @@ index a966239f0..a7830c594 100644 -performs an IPI to inform all processors about the new mapping. This results -in a significant performance penalty. diff --git a/arch/Kconfig b/arch/Kconfig -index b0319fa3c..32694b49d 100644 +index 0fc9c6d59..0a371be43 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -50,6 +50,7 @@ config OPROFILE @@ -825,7 +826,7 @@ index b0319fa3c..32694b49d 100644 select RING_BUFFER select RING_BUFFER_ALLOW_SWAP help -@@ -683,6 +684,12 @@ config HAVE_TIF_NOHZ +@@ -686,6 +687,12 @@ config HAVE_TIF_NOHZ config HAVE_VIRT_CPU_ACCOUNTING bool @@ -838,7 +839,7 @@ index b0319fa3c..32694b49d 100644 config ARCH_HAS_SCALED_CPUTIME bool -@@ -697,7 +704,6 @@ config HAVE_VIRT_CPU_ACCOUNTING_GEN +@@ -700,7 +707,6 @@ config HAVE_VIRT_CPU_ACCOUNTING_GEN some 32-bit arches may require multiple accesses, so proper locking is needed to protect against concurrent accesses. @@ -1048,10 +1049,10 @@ index 1b9f473c6..c79912a6b 100644 + alloc_kmap_pgtable(FIXMAP_BASE); } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index 400d53736..220c116cd 100644 +index 370b9048e..0b80f42f5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig -@@ -31,6 +31,7 @@ config ARM +@@ -32,6 +32,7 @@ config ARM select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7 select ARCH_SUPPORTS_ATOMIC_RMW @@ -1059,7 +1060,7 @@ index 400d53736..220c116cd 100644 select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU -@@ -66,7 +67,7 @@ config ARM +@@ -67,7 +68,7 @@ config ARM select HARDIRQS_SW_RESEND select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 @@ -1068,7 +1069,7 @@ index 400d53736..220c116cd 100644 select HAVE_ARCH_KFENCE if MMU select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL -@@ -109,6 +110,7 @@ config ARM +@@ -110,6 +111,7 @@ config ARM select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP @@ -1076,7 +1077,7 @@ index 400d53736..220c116cd 100644 select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RSEQ -@@ -124,6 +126,7 @@ config ARM +@@ -125,6 +127,7 @@ config ARM select OLD_SIGSUSPEND3 select PCI_SYSCALL if PCI select PERF_USE_VMALLOC @@ -1084,7 +1085,7 @@ index 400d53736..220c116cd 100644 select RTC_LIB select SET_FS select SYS_SUPPORTS_APM_EMULATION -@@ -1520,6 +1523,7 @@ config HAVE_ARCH_PFN_VALID +@@ -1521,6 +1524,7 @@ config HAVE_ARCH_PFN_VALID config HIGHMEM bool "High Memory Support" depends on MMU @@ -1556,7 +1557,7 @@ index 187fab227..000000000 - return (void *)vaddr; -} diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index 30c747321..9b3118068 100644 +index 851d24a35..649da0920 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -78,6 +78,7 @@ config ARM64 @@ -1666,7 +1667,7 @@ index 18782f0c4..6672b0535 100644 #include diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h -index dd8d27ea7..6464a3224 100644 +index 390d96125..531420502 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -27,6 +27,7 @@ struct thread_info { @@ -1685,7 +1686,7 @@ index dd8d27ea7..6464a3224 100644 #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ -@@ -102,6 +104,7 @@ void arch_release_task_struct(struct task_struct *tsk); +@@ -104,6 +106,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_SVE (1 << TIF_SVE) #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) @@ -1693,7 +1694,7 @@ index dd8d27ea7..6464a3224 100644 #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_32BIT_AARCH64 (1 << TIF_32BIT_AARCH64) #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) -@@ -109,9 +112,12 @@ void arch_release_task_struct(struct task_struct *tsk); +@@ -111,9 +114,12 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ @@ -1720,10 +1721,10 @@ index 5f59e24c9..4f522206c 100644 DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); #endif diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S -index 64145bfab..4cdbba720 100644 +index 1c9e7ce86..c13562842 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S -@@ -521,9 +521,18 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING +@@ -517,9 +517,18 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING orr x24, x24, x0 alternative_else_nop_endif #endif @@ -1745,12 +1746,12 @@ index 64145bfab..4cdbba720 100644 mov x0, sp diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c -index 5335a6bd1..84520f116 100644 +index c2489a72b..131b2dda3 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c -@@ -226,6 +226,16 @@ static void sve_free(struct task_struct *task) - __sve_free(task); - } +@@ -313,6 +313,15 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type, + * may disable TIF_SME and reenable traps. + */ +static void *sve_free_atomic(struct task_struct *task) +{ @@ -1761,32 +1762,31 @@ index 5335a6bd1..84520f116 100644 + task->thread.sve_state = NULL; + return sve_state; +} -+ + /* * TIF_SVE controls whether a task can use SVE without trapping while - * in userspace, and also the way a task's FPSIMD/SVE state is stored -@@ -1022,6 +1032,7 @@ void fpsimd_thread_switch(struct task_struct *next) +@@ -1556,6 +1565,7 @@ static void fpsimd_flush_thread_vl(enum vec_type type) + void fpsimd_flush_thread(void) { - int vl, supported_vl; -+ void *mem = NULL; - ++ void *mem = NULL; if (!system_supports_fpsimd()) return; -@@ -1034,7 +1045,7 @@ void fpsimd_flush_thread(void) + +@@ -1567,7 +1577,7 @@ void fpsimd_flush_thread(void) if (system_supports_sve()) { clear_thread_flag(TIF_SVE); - sve_free(current); + mem = sve_free_atomic(current); + fpsimd_flush_thread_vl(ARM64_VEC_SVE); + } - /* - * Reset the task vector length as required. -@@ -1068,6 +1079,7 @@ void fpsimd_flush_thread(void) +@@ -1579,6 +1589,7 @@ void fpsimd_flush_thread(void) } put_cpu_fpsimd_context(); -+ kfree(mem); ++ kfree(mem); } /* @@ -1805,10 +1805,10 @@ index 9a8f7c256..c0753dcdb 100644 static DEFINE_PER_CPU(call_single_data_t, cpu_backtrace_csd) = diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c -index 17cb54d1e..7f4a03453 100644 +index 7437291ff..d84d24413 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c -@@ -694,7 +694,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, +@@ -861,7 +861,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) { do { @@ -1818,10 +1818,10 @@ index 17cb54d1e..7f4a03453 100644 local_daif_restore(DAIF_PROCCTX_NOIRQ); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c -index bc5a91d17..b757d28ab 100644 +index 718f6060b..98b4249a2 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c -@@ -855,7 +855,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +@@ -863,7 +863,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) * involves poking the GIC, which must be done in a * non-preemptible context. */ @@ -1830,7 +1830,7 @@ index bc5a91d17..b757d28ab 100644 kvm_pmu_flush_hwstate(vcpu); -@@ -879,7 +879,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +@@ -887,7 +887,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_timer_sync_user(vcpu); kvm_vgic_sync_hwstate(vcpu); local_irq_enable(); @@ -1839,7 +1839,7 @@ index bc5a91d17..b757d28ab 100644 continue; } -@@ -958,7 +958,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +@@ -966,7 +966,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* Exit types that need handling before we can be preempted */ handle_exit_early(vcpu, ret); @@ -2268,10 +2268,10 @@ index 4a0c30ced..498eaa4d3 100644 static void highmem_setup(void) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig -index 896a29df1..1b3593d53 100644 +index 0b87c1819..94b679ba0 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig -@@ -2727,6 +2727,7 @@ config WAR_MIPS34K_MISSED_ITLB +@@ -2728,6 +2728,7 @@ config WAR_MIPS34K_MISSED_ITLB config HIGHMEM bool "High Memory Support" depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA @@ -3226,7 +3226,7 @@ index 310bcd768..ae3212dcf 100644 } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c -index 1d20f0f77..7e0a497a3 100644 +index ba9b54d35..588e081ca 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -312,12 +312,11 @@ static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct, @@ -3908,10 +3908,10 @@ index 3348e0c4d..0db6919af 100644 (VMALLOC_END - VMALLOC_START) >> 20, diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig -index 530b7ec5d..a38d00d8b 100644 +index 7e2ce4c8d..1a6e4b187 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig -@@ -139,6 +139,7 @@ config MMU +@@ -140,6 +140,7 @@ config MMU config HIGHMEM bool default y if SPARC32 @@ -4257,7 +4257,7 @@ index e4abac6c9..173999422 100644 static struct kmsg_dumper kmsg_dumper = { diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index 236b510b2..403764738 100644 +index 04046abd1..4f14d3979 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -16,6 +16,7 @@ config X86_32 @@ -4268,7 +4268,7 @@ index 236b510b2..403764738 100644 select MODULES_USE_ELF_REL select OLD_SIGACTION select GENERIC_VDSO_32 -@@ -95,6 +96,7 @@ config X86 +@@ -96,6 +97,7 @@ config X86 select ARCH_SUPPORTS_ACPI select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 @@ -4276,7 +4276,7 @@ index 236b510b2..403764738 100644 select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS -@@ -219,6 +221,7 @@ config X86 +@@ -220,6 +222,7 @@ config X86 select HAVE_PCI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP @@ -4603,7 +4603,7 @@ index 77217bd29..8eba66a33 100644 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h -index 1b37f1d3a..2eb9b9f94 100644 +index e34d4b508..4b049abe2 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -30,6 +30,7 @@ extern void kernel_fpu_begin_mask(unsigned int kfpu_mask); @@ -5048,10 +5048,10 @@ index 440eed558..7cfc4e6b7 100644 } +#endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 5afb7bfda..b60a41928 100644 +index dc098fe48..75c1c28e9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -8347,6 +8347,14 @@ int kvm_arch_init(void *opaque) +@@ -8392,6 +8392,14 @@ int kvm_arch_init(void *opaque) goto out; } @@ -5401,10 +5401,10 @@ index 673196fe8..0735ca5e8 100644 kmap_waitqueues_init(); } diff --git a/block/blk-mq.c b/block/blk-mq.c -index 1eab99166..dea9f38b4 100644 +index 9ec937a6a..701c78a50 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c -@@ -44,7 +44,7 @@ +@@ -47,7 +47,7 @@ bool mq_unfair_dtag = true; module_param_named(unfair_dtag, mq_unfair_dtag, bool, 0444); @@ -5413,7 +5413,7 @@ index 1eab99166..dea9f38b4 100644 static void blk_mq_poll_stats_start(struct request_queue *q); static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); -@@ -650,80 +650,29 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) +@@ -654,80 +654,29 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) } EXPORT_SYMBOL(blk_mq_end_request); @@ -5502,7 +5502,7 @@ index 1eab99166..dea9f38b4 100644 } static inline bool blk_mq_complete_need_ipi(struct request *rq) -@@ -733,6 +682,14 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) +@@ -737,6 +686,14 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) if (!IS_ENABLED(CONFIG_SMP) || !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) return false; @@ -5517,7 +5517,7 @@ index 1eab99166..dea9f38b4 100644 /* same CPU or cache domain? Complete locally */ if (cpu == rq->mq_ctx->cpu || -@@ -744,6 +701,31 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) +@@ -748,6 +705,31 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) return cpu_online(rq->mq_ctx->cpu); } @@ -5549,7 +5549,7 @@ index 1eab99166..dea9f38b4 100644 bool blk_mq_complete_request_remote(struct request *rq) { WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); -@@ -756,15 +738,15 @@ bool blk_mq_complete_request_remote(struct request *rq) +@@ -760,15 +742,15 @@ bool blk_mq_complete_request_remote(struct request *rq) return false; if (blk_mq_complete_need_ipi(rq)) { @@ -5572,7 +5572,7 @@ index 1eab99166..dea9f38b4 100644 } EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); -@@ -1679,14 +1661,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, +@@ -1685,14 +1667,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, return; if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { @@ -5590,7 +5590,7 @@ index 1eab99166..dea9f38b4 100644 } /* -@@ -4215,7 +4197,7 @@ static int __init blk_mq_init(void) +@@ -4245,7 +4227,7 @@ static int __init blk_mq_init(void) int i; for_each_possible_cpu(i) @@ -5754,7 +5754,7 @@ index a9e7f5a82..30b4c288c 100644 static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space, u8 *buf, size_t bufsiz) diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c -index 14fad16d3..f9a4ac20f 100644 +index 3e1bb28b7..c26ed0ce6 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -50,6 +50,31 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da @@ -5789,7 +5789,7 @@ index 14fad16d3..f9a4ac20f 100644 static int interrupts = -1; module_param(interrupts, int, 0444); MODULE_PARM_DESC(interrupts, "Enable interrupts"); -@@ -170,7 +195,7 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, +@@ -186,7 +211,7 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); while (len--) @@ -5798,7 +5798,7 @@ index 14fad16d3..f9a4ac20f 100644 return 0; } -@@ -197,7 +222,7 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value) +@@ -213,7 +238,7 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); @@ -6237,10 +6237,10 @@ index 60ab7151b..93f92ccd4 100644 return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c -index 5cea6eea7..785023081 100644 +index 9a02c4871..758c484c7 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c -@@ -89,11 +89,11 @@ apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) +@@ -88,11 +88,11 @@ apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) { void *reloc_page; @@ -6254,7 +6254,7 @@ index 5cea6eea7..785023081 100644 } static void -@@ -105,9 +105,9 @@ apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) +@@ -104,9 +104,9 @@ apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) if (info->src_bo && !info->src_bo->is_primary) id = info->src_bo->surface_id; @@ -6266,7 +6266,7 @@ index 5cea6eea7..785023081 100644 } /* return holding the reference to this object */ -@@ -149,7 +149,6 @@ static int qxl_process_single_command(struct qxl_device *qdev, +@@ -148,7 +148,6 @@ static int qxl_process_single_command(struct qxl_device *qdev, struct qxl_bo *cmd_bo; void *fb_cmd; int i, ret, num_relocs; @@ -6274,7 +6274,7 @@ index 5cea6eea7..785023081 100644 switch (cmd->type) { case QXL_CMD_DRAW: -@@ -185,21 +184,21 @@ static int qxl_process_single_command(struct qxl_device *qdev, +@@ -184,21 +183,21 @@ static int qxl_process_single_command(struct qxl_device *qdev, goto out_free_reloc; /* TODO copy slow path code from i915 */ @@ -6574,6 +6574,19 @@ index e99400f3a..396ec97f1 100644 &bytes_written); if (bytes_written) hyperv_report_panic_msg(panic_pa, bytes_written); +diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c +index 1b3d7b43d..a5c0d38a3 100644 +--- a/drivers/irqchip/irq-gic-v4.c ++++ b/drivers/irqchip/irq-gic-v4.c +@@ -86,7 +86,7 @@ + static struct irq_domain *gic_domain; + static const struct irq_domain_ops *vpe_domain_ops; + static const struct irq_domain_ops *sgi_domain_ops; +-static bool vtimer_irqbypass; ++bool vtimer_irqbypass; + + static bool has_v4_1(void) + { diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig index ce9429ca6..29ccbd6ac 100644 --- a/drivers/leds/trigger/Kconfig @@ -6587,7 +6600,7 @@ index ce9429ca6..29ccbd6ac 100644 This allows LEDs to be controlled by active CPUs. This shows the active CPUs across an array of LEDs so you can see which diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c -index 5af25898b..c40e960cd 100644 +index b4d004b62..8ba4c4804 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2218,8 +2218,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) @@ -7106,10 +7119,10 @@ index 71e2ada86..72e2e71aa 100644 /* Find first taken slot. */ for (slot = 0; slot < ATH_BCBUF; slot++) { diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c -index 4353443b8..03e2569da 100644 +index 2d6c77dcc..7daba964f 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c -@@ -1522,7 +1522,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +@@ -1518,7 +1518,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) * Prevents hv_pci_onchannelcallback() from running concurrently * in the tasklet. */ @@ -7216,10 +7229,10 @@ index 4261380af..65160eaaa 100644 /* peek cache of free slot */ if (pool->left != FC_XID_UNKNOWN) { diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h -index b6dc9003b..d5cf70ead 100644 +index 61b11490a..32c534b87 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h -@@ -153,12 +153,55 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value) +@@ -152,12 +152,55 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value) up->dl_write(up, value); } @@ -7276,7 +7289,7 @@ index b6dc9003b..d5cf70ead 100644 return true; } -@@ -167,7 +210,7 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up) +@@ -166,7 +209,7 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up) if (!(up->ier & UART_IER_THRI)) return false; up->ier &= ~UART_IER_THRI; @@ -7286,7 +7299,7 @@ index b6dc9003b..d5cf70ead 100644 } diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c -index 0a7e9491b..83536b159 100644 +index 43f2eed6d..687119fe2 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -275,10 +275,8 @@ static void serial8250_backup_timeout(struct timer_list *t) @@ -7428,10 +7441,10 @@ index de48a5846..d246f2755 100644 static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c -index 1f231fcda..0901c5bae 100644 +index 432a43892..1943f60c6 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c -@@ -729,7 +729,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) +@@ -730,7 +730,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) serial_out(p, UART_EFR, UART_EFR_ECB); serial_out(p, UART_LCR, 0); } @@ -7440,7 +7453,7 @@ index 1f231fcda..0901c5bae 100644 if (p->capabilities & UART_CAP_EFR) { serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(p, UART_EFR, efr); -@@ -1404,7 +1404,7 @@ static void serial8250_stop_rx(struct uart_port *port) +@@ -1405,7 +1405,7 @@ static void serial8250_stop_rx(struct uart_port *port) up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); up->port.read_status_mask &= ~UART_LSR_DR; @@ -7449,7 +7462,7 @@ index 1f231fcda..0901c5bae 100644 serial8250_rpm_put(up); } -@@ -1434,7 +1434,7 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p) +@@ -1435,7 +1435,7 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p) serial8250_clear_and_reinit_fifos(p); p->ier |= UART_IER_RLSI | UART_IER_RDI; @@ -7458,7 +7471,7 @@ index 1f231fcda..0901c5bae 100644 } } EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx); -@@ -1676,7 +1676,7 @@ static void serial8250_disable_ms(struct uart_port *port) +@@ -1677,7 +1677,7 @@ static void serial8250_disable_ms(struct uart_port *port) mctrl_gpio_disable_ms(up->gpios); up->ier &= ~UART_IER_MSI; @@ -7467,7 +7480,7 @@ index 1f231fcda..0901c5bae 100644 } static void serial8250_enable_ms(struct uart_port *port) -@@ -1692,7 +1692,7 @@ static void serial8250_enable_ms(struct uart_port *port) +@@ -1693,7 +1693,7 @@ static void serial8250_enable_ms(struct uart_port *port) up->ier |= UART_IER_MSI; serial8250_rpm_get(up); @@ -7476,7 +7489,7 @@ index 1f231fcda..0901c5bae 100644 serial8250_rpm_put(up); } -@@ -2116,14 +2116,7 @@ static void serial8250_put_poll_char(struct uart_port *port, +@@ -2126,14 +2126,7 @@ static void serial8250_put_poll_char(struct uart_port *port, struct uart_8250_port *up = up_to_u8250p(port); serial8250_rpm_get(up); @@ -7492,7 +7505,7 @@ index 1f231fcda..0901c5bae 100644 wait_for_xmitr(up, BOTH_EMPTY); /* -@@ -2136,7 +2129,7 @@ static void serial8250_put_poll_char(struct uart_port *port, +@@ -2146,7 +2139,7 @@ static void serial8250_put_poll_char(struct uart_port *port, * and restore the IER */ wait_for_xmitr(up, BOTH_EMPTY); @@ -7501,7 +7514,7 @@ index 1f231fcda..0901c5bae 100644 serial8250_rpm_put(up); } -@@ -2441,7 +2434,7 @@ void serial8250_do_shutdown(struct uart_port *port) +@@ -2451,7 +2444,7 @@ void serial8250_do_shutdown(struct uart_port *port) */ spin_lock_irqsave(&port->lock, flags); up->ier = 0; @@ -7510,7 +7523,7 @@ index 1f231fcda..0901c5bae 100644 spin_unlock_irqrestore(&port->lock, flags); synchronize_irq(port->irq); -@@ -2797,7 +2790,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, +@@ -2803,7 +2796,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, if (up->capabilities & UART_CAP_RTOIE) up->ier |= UART_IER_RTOIE; @@ -7519,7 +7532,7 @@ index 1f231fcda..0901c5bae 100644 if (up->capabilities & UART_CAP_EFR) { unsigned char efr = 0; -@@ -3262,7 +3255,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults); +@@ -3267,7 +3260,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults); #ifdef CONFIG_SERIAL_8250_CONSOLE @@ -7528,7 +7541,7 @@ index 1f231fcda..0901c5bae 100644 { struct uart_8250_port *up = up_to_u8250p(port); -@@ -3270,6 +3263,18 @@ static void serial8250_console_putchar(struct uart_port *port, int ch) +@@ -3275,6 +3268,18 @@ static void serial8250_console_putchar(struct uart_port *port, int ch) serial_port_out(port, UART_TX, ch); } @@ -7547,7 +7560,7 @@ index 1f231fcda..0901c5bae 100644 /* * Restore serial console when h/w power-off detected */ -@@ -3296,6 +3301,32 @@ static void serial8250_console_restore(struct uart_8250_port *up) +@@ -3301,6 +3306,32 @@ static void serial8250_console_restore(struct uart_8250_port *up) serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS); } @@ -7580,7 +7593,7 @@ index 1f231fcda..0901c5bae 100644 /* * Print a string to the serial port trying not to disturb * any possible real use of the port... -@@ -3312,24 +3343,12 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, +@@ -3317,24 +3348,12 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, struct uart_port *port = &up->port; unsigned long flags; unsigned int ier; @@ -7607,7 +7620,7 @@ index 1f231fcda..0901c5bae 100644 /* check scratch reg to see if port powered off during system sleep */ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) { -@@ -3343,7 +3362,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, +@@ -3348,7 +3367,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, mdelay(port->rs485.delay_rts_before_send); } @@ -7617,7 +7630,7 @@ index 1f231fcda..0901c5bae 100644 /* * Finally, wait for transmitter to become empty -@@ -3356,8 +3377,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, +@@ -3361,8 +3382,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, if (em485->tx_stopped) up->rs485_stop_tx(up); } @@ -7627,7 +7640,7 @@ index 1f231fcda..0901c5bae 100644 /* * The receive handling will happen properly because the -@@ -3369,8 +3389,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, +@@ -3374,8 +3394,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, if (up->msr_saved_flags) serial8250_modem_status(up); @@ -7637,7 +7650,7 @@ index 1f231fcda..0901c5bae 100644 } static unsigned int probe_baud(struct uart_port *port) -@@ -3390,6 +3409,7 @@ static unsigned int probe_baud(struct uart_port *port) +@@ -3395,6 +3414,7 @@ static unsigned int probe_baud(struct uart_port *port) int serial8250_console_setup(struct uart_port *port, char *options, bool probe) { @@ -7645,7 +7658,7 @@ index 1f231fcda..0901c5bae 100644 int baud = 9600; int bits = 8; int parity = 'n'; -@@ -3399,6 +3419,8 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe) +@@ -3404,6 +3424,8 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe) if (!port->iobase && !port->membase) return -ENODEV; @@ -7729,7 +7742,7 @@ index 84e815808..342005ed5 100644 static int __init diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c -index c3abcd043..2479ea4c8 100644 +index 89e3e220f..30e549b5e 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -172,10 +172,10 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size) @@ -8032,7 +8045,7 @@ index d5294e663..ee8846818 100644 if (!o->nodeid) { /* diff --git a/fs/inode.c b/fs/inode.c -index 7436a17a2..45a821a8c 100644 +index fd1b9fddf..0d4adddba 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -158,7 +158,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) @@ -8045,7 +8058,7 @@ index 7436a17a2..45a821a8c 100644 inode->dirtied_when = 0; diff --git a/fs/namei.c b/fs/namei.c -index f08e14d6d..14d27fe95 100644 +index 3588e12d6..7a171441c 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1538,7 +1538,7 @@ static struct dentry *__lookup_slow(const struct qstr *name, @@ -8057,7 +8070,7 @@ index f08e14d6d..14d27fe95 100644 /* Don't go there if it's already dead */ if (unlikely(IS_DEADDIR(inode))) -@@ -3035,7 +3035,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file, +@@ -3092,7 +3092,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file, struct dentry *dentry; int error, create_error = 0; umode_t mode = op->mode; @@ -8137,7 +8150,7 @@ index b27ebdcce..f86c98a7e 100644 status = -EBUSY; spin_lock(&dentry->d_lock); diff --git a/fs/proc/array.c b/fs/proc/array.c -index 18a4588c3..decaa7768 100644 +index 989f76020..d638d2c26 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -384,9 +384,9 @@ static inline void task_context_switch_counts(struct seq_file *m, @@ -8145,26 +8158,27 @@ index 18a4588c3..decaa7768 100644 { seq_printf(m, "Cpus_allowed:\t%*pb\n", - cpumask_pr_args(task->cpus_ptr)); -+ cpumask_pr_args(&task->cpus_mask)); ++ cpumask_pr_args(&task->cpus_mask)); seq_printf(m, "Cpus_allowed_list:\t%*pbl\n", - cpumask_pr_args(task->cpus_ptr)); + cpumask_pr_args(&task->cpus_mask)); } - static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm) + #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY diff --git a/fs/proc/base.c b/fs/proc/base.c -index 24c70ff92..6c8156c4c 100644 +index 1516ff281..fe38291e7 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c -@@ -97,6 +97,7 @@ - #include - #include - #include -+#include - #include +@@ -106,7 +106,7 @@ #include #include "internal.h" -@@ -2164,7 +2165,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, + #include "fd.h" +- ++#include + #include "../../lib/kstrtox.h" + + /* NOTE: +@@ -2265,7 +2265,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, child = d_hash_and_lookup(dir, &qname); if (!child) { @@ -8174,7 +8188,7 @@ index 24c70ff92..6c8156c4c 100644 if (IS_ERR(child)) goto end_instantiate; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c -index df435cd91..eb19a3429 100644 +index b7629943b..d69b06862 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -684,7 +684,7 @@ static bool proc_sys_fill_cache(struct file *file, @@ -8514,7 +8528,7 @@ index 6cd2a92da..ab602b95d 100644 #endif diff --git a/include/linux/fs.h b/include/linux/fs.h -index f6bb20f6f..e7d79fdf4 100644 +index f66f51edd..441742081 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -724,7 +724,7 @@ struct inode { @@ -8804,7 +8818,7 @@ index 000000000..f9bc6acd3 + +#endif diff --git a/include/linux/highmem.h b/include/linux/highmem.h -index cc5fe6c62..77be3e318 100644 +index ebfee2b67..5ebb6facc 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -11,217 +11,137 @@ @@ -8878,19 +8892,19 @@ index cc5fe6c62..77be3e318 100644 + * @page: Pointer to the page to be mapped + * + * Returns: The virtual address of the mapping ++ * ++ * Can only be invoked from preemptible task context because on 32bit ++ * systems with CONFIG_HIGHMEM enabled this function might sleep. * - * However when holding an atomic kmap it is not legal to sleep, so atomic - * kmaps are appropriate for short, tight code paths only. -+ * Can only be invoked from preemptible task context because on 32bit -+ * systems with CONFIG_HIGHMEM enabled this function might sleep. ++ * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area ++ * this returns the virtual address of the direct kernel mapping. * - * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap - * gives a more generic (and caching) interface. But kmap_atomic can - * be used in IRQ contexts, so in some (very limited) cases we need - * it. -+ * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area -+ * this returns the virtual address of the direct kernel mapping. -+ * + * The returned virtual address is globally visible and valid up to the + * point where it is unmapped via kunmap(). The pointer can be handed to + * other contexts. @@ -9072,13 +9086,13 @@ index cc5fe6c62..77be3e318 100644 - kunmap_flush_on_unmap(addr); #endif -} -- + -#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) - -#define kmap_flush_unused() do {} while(0) - -#endif /* CONFIG_HIGHMEM */ - +- -#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) - -DECLARE_PER_CPU(int, __kmap_atomic_idx); @@ -9342,7 +9356,7 @@ index ec2a47a81..9448e2bfc 100644 bool irq_work_queue_on(struct irq_work *work, int cpu); diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h -index dc1b213ae..9bbcd8cba 100644 +index 8710502b3..db4c4d3f8 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -68,6 +68,7 @@ struct irq_desc { @@ -9395,10 +9409,10 @@ index fef2d43a7..741aa2008 100644 defined(CONFIG_PREEMPT_TRACER) extern void stop_critical_timings(void); diff --git a/include/linux/kernel.h b/include/linux/kernel.h -index e1d66cc50..727913a6f 100644 +index 2023afa63..42ce73380 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h -@@ -220,6 +220,7 @@ static __always_inline void might_resched(void) +@@ -223,6 +223,7 @@ static __always_inline void might_resched(void) extern void ___might_sleep(const char *file, int line, int preempt_offset); extern void __might_sleep(const char *file, int line, int preempt_offset); extern void __cant_sleep(const char *file, int line, int preempt_offset); @@ -9406,7 +9420,7 @@ index e1d66cc50..727913a6f 100644 /** * might_sleep - annotation for functions that can sleep -@@ -235,6 +236,10 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); +@@ -238,6 +239,10 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); */ # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) @@ -9417,7 +9431,7 @@ index e1d66cc50..727913a6f 100644 /** * cant_sleep - annotation for functions that cannot sleep * -@@ -243,6 +248,18 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); +@@ -246,6 +251,18 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); # define cant_sleep() \ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0) # define sched_annotate_sleep() (current->task_state_change = 0) @@ -9436,7 +9450,7 @@ index e1d66cc50..727913a6f 100644 /** * non_block_start - annotate the start of section where sleeping is prohibited * -@@ -266,7 +283,9 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); +@@ -269,7 +286,9 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); static inline void __might_sleep(const char *file, int line, int preempt_offset) { } # define might_sleep() do { might_resched(); } while (0) @@ -9446,7 +9460,7 @@ index e1d66cc50..727913a6f 100644 # define sched_annotate_sleep() do { } while (0) # define non_block_start() do { } while (0) # define non_block_end() do { } while (0) -@@ -274,13 +293,6 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); +@@ -277,13 +296,6 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) @@ -9718,7 +9732,7 @@ index 3f02b8186..1b8ae0349 100644 + +#endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index d1be389c0..54b482514 100644 +index 940b19d4a..f4f782e98 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -13,6 +13,7 @@ @@ -9940,7 +9954,7 @@ index 000000000..f0b2e07cd + +#endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h -index 5491ad5f4..cd9e5b3f1 100644 +index 33442fd01..4612bb5be 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1675,7 +1675,7 @@ struct nfs_unlinkdata { @@ -10286,7 +10300,7 @@ index 89d5281e0..0386dd2ab 100644 #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/printk.h b/include/linux/printk.h -index 7d787f91d..9331b131b 100644 +index 9bd33432d..26c70fd11 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -46,6 +46,12 @@ static inline const char *printk_skip_headers(const char *buffer) @@ -10439,7 +10453,7 @@ index 000000000..77a89dd2c + +#endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h -index 095b3b39b..1effcae06 100644 +index ef8d56b18..cbeea5c61 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -54,6 +54,11 @@ void __rcu_read_unlock(void); @@ -10463,7 +10477,7 @@ index 095b3b39b..1effcae06 100644 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ /* Internal to kernel */ -@@ -329,7 +336,8 @@ static inline void rcu_preempt_sleep_check(void) { } +@@ -338,7 +345,8 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_sleep_check() \ do { \ rcu_preempt_sleep_check(); \ @@ -10877,7 +10891,7 @@ index 4c715be48..9323af8a9 100644 * lock for reading */ diff --git a/include/linux/sched.h b/include/linux/sched.h -index 5e413d309..d3329dc9a 100644 +index 8ccbca99a..c7e9afdd7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -36,6 +36,7 @@ @@ -10921,7 +10935,7 @@ index 5e413d309..d3329dc9a 100644 /* * set_special_state() should be used for those states when the blocking task * can not use the regular condition based wait-loop. In that case we must -@@ -675,6 +678,13 @@ struct wake_q_node { +@@ -679,6 +682,13 @@ struct wake_q_node { struct wake_q_node *next; }; @@ -10935,7 +10949,7 @@ index 5e413d309..d3329dc9a 100644 /** * struct task_struct_resvd - KABI extension struct */ -@@ -700,6 +710,8 @@ struct task_struct { +@@ -704,6 +714,8 @@ struct task_struct { #endif /* -1 unrunnable, 0 runnable, >0 stopped: */ volatile long state; @@ -10944,7 +10958,7 @@ index 5e413d309..d3329dc9a 100644 /* * This begins the randomizable portion of task_struct. Only -@@ -772,6 +784,11 @@ struct task_struct { +@@ -776,6 +788,11 @@ struct task_struct { int nr_cpus_allowed; const cpumask_t *cpus_ptr; cpumask_t cpus_mask; @@ -10956,7 +10970,7 @@ index 5e413d309..d3329dc9a 100644 #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; -@@ -880,6 +897,10 @@ struct task_struct { +@@ -884,6 +901,10 @@ struct task_struct { #ifdef CONFIG_IOMMU_SVA KABI_FILL_HOLE(unsigned pasid_activated:1) #endif @@ -10967,7 +10981,7 @@ index 5e413d309..d3329dc9a 100644 unsigned long atomic_flags; /* Flags requiring atomic access. */ -@@ -1021,11 +1042,16 @@ struct task_struct { +@@ -1025,11 +1046,16 @@ struct task_struct { /* Signal handlers: */ struct signal_struct *signal; struct sighand_struct __rcu *sighand; @@ -10984,7 +10998,7 @@ index 5e413d309..d3329dc9a 100644 unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; -@@ -1052,6 +1078,7 @@ struct task_struct { +@@ -1056,6 +1082,7 @@ struct task_struct { raw_spinlock_t pi_lock; struct wake_q_node wake_q; @@ -10992,7 +11006,7 @@ index 5e413d309..d3329dc9a 100644 #ifdef CONFIG_RT_MUTEXES /* PI waiters blocked on a rt_mutex held by this task: */ -@@ -1079,6 +1106,9 @@ struct task_struct { +@@ -1083,6 +1110,9 @@ struct task_struct { int softirq_context; int irq_config; #endif @@ -11002,7 +11016,7 @@ index 5e413d309..d3329dc9a 100644 #ifdef CONFIG_LOCKDEP # define MAX_LOCK_DEPTH 48UL -@@ -1364,6 +1394,7 @@ struct task_struct { +@@ -1368,6 +1398,7 @@ struct task_struct { unsigned int sequential_io; unsigned int sequential_io_avg; #endif @@ -11010,7 +11024,7 @@ index 5e413d309..d3329dc9a 100644 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; #endif -@@ -1851,6 +1882,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr); +@@ -1868,6 +1899,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); @@ -11018,7 +11032,7 @@ index 5e413d309..d3329dc9a 100644 extern void wake_up_new_task(struct task_struct *tsk); #ifdef CONFIG_SMP -@@ -1952,6 +1984,89 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) +@@ -1969,6 +2001,89 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -11108,6 +11122,18 @@ index 5e413d309..d3329dc9a 100644 /* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return +@@ -2059,11 +2174,7 @@ static inline int spin_needbreak(spinlock_t *lock) + */ + static inline int rwlock_needbreak(rwlock_t *lock) + { +-#ifdef CONFIG_PREEMPTION +- return rwlock_is_contended(lock); +-#else + return 0; +-#endif + } + + static __always_inline bool need_resched(void) diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h index 9a62ffdd2..412cdaba3 100644 --- a/include/linux/sched/hotplug.h @@ -11194,7 +11220,7 @@ index 26a2013ac..6e2dff721 100644 #endif /* _LINUX_SCHED_WAKE_Q_H */ diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h -index 0180b3d06..68a2debc1 100644 +index de6322f91..35589e8c5 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -7,6 +7,7 @@ @@ -11205,7 +11231,7 @@ index 0180b3d06..68a2debc1 100644 #include #include #include -@@ -126,6 +127,8 @@ struct uart_8250_port { +@@ -125,6 +126,8 @@ struct uart_8250_port { #define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA unsigned char msr_saved_flags; @@ -11214,7 +11240,7 @@ index 0180b3d06..68a2debc1 100644 struct uart_8250_dma *dma; const struct uart_8250_ops *ops; -@@ -181,6 +184,8 @@ void serial8250_init_port(struct uart_8250_port *up); +@@ -180,6 +183,8 @@ void serial8250_init_port(struct uart_8250_port *up); void serial8250_set_defaults(struct uart_8250_port *up); void serial8250_console_write(struct uart_8250_port *up, const char *s, unsigned int count); @@ -11249,10 +11275,10 @@ index 3038a0610..fff1656c6 100644 /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h -index d16c8bd08..d7248f71d 100644 +index 766f7ca57..93a7cef9b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h -@@ -298,6 +298,7 @@ struct sk_buff_head { +@@ -299,6 +299,7 @@ struct sk_buff_head { __u32 qlen; spinlock_t lock; @@ -11260,7 +11286,7 @@ index d16c8bd08..d7248f71d 100644 }; struct sk_buff; -@@ -1929,6 +1930,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) +@@ -1930,6 +1931,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) __skb_queue_head_init(list); } @@ -11274,7 +11300,7 @@ index d16c8bd08..d7248f71d 100644 struct lock_class_key *class) { diff --git a/include/linux/smp.h b/include/linux/smp.h -index 84a0b4828..8348fa412 100644 +index 812c26f61..dcf0b5d50 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -260,6 +260,9 @@ static inline int get_boot_cpu_id(void) @@ -12351,10 +12377,10 @@ index eb5ec1fb6..122d96db9 100644 /* This part must be outside protection */ diff --git a/init/Kconfig b/init/Kconfig -index 31fff350a..7cc9ec6ef 100644 +index 69bd400da..c3f511a69 100644 --- a/init/Kconfig +++ b/init/Kconfig -@@ -866,7 +866,7 @@ config NUMA_BALANCING +@@ -899,7 +899,7 @@ config NUMA_BALANCING bool "Memory placement aware NUMA scheduler" depends on ARCH_SUPPORTS_NUMA_BALANCING depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY @@ -12363,7 +12389,7 @@ index 31fff350a..7cc9ec6ef 100644 help This option adds support for automatic NUMA aware memory/task placement. The mechanism is quite primitive and is based on migrating memory when -@@ -1023,6 +1023,7 @@ config CFS_BANDWIDTH +@@ -1056,6 +1056,7 @@ config CFS_BANDWIDTH config RT_GROUP_SCHED bool "Group scheduling for SCHED_RR/FIFO" depends on CGROUP_SCHED @@ -12371,7 +12397,7 @@ index 31fff350a..7cc9ec6ef 100644 default n help This feature lets you explicitly allocate real CPU bandwidth -@@ -2005,6 +2006,7 @@ choice +@@ -2051,6 +2052,7 @@ choice config SLAB bool "SLAB" @@ -12379,7 +12405,7 @@ index 31fff350a..7cc9ec6ef 100644 select HAVE_HARDENED_USERCOPY_ALLOCATOR help The regular slab allocator that is established and known to work -@@ -2025,6 +2027,7 @@ config SLUB +@@ -2071,6 +2073,7 @@ config SLUB config SLOB depends on EXPERT bool "SLOB (Simple Allocator)" @@ -12387,7 +12413,7 @@ index 31fff350a..7cc9ec6ef 100644 help SLOB replaces the stock allocator with a drastically simpler allocator. SLOB is generally more space efficient but -@@ -2091,7 +2094,7 @@ config SHUFFLE_PAGE_ALLOCATOR +@@ -2137,7 +2140,7 @@ config SHUFFLE_PAGE_ALLOCATOR config SLUB_CPU_PARTIAL default y @@ -12434,7 +12460,7 @@ index e62a62303..b95f8784c 100644 This option turns the kernel into a real-time kernel by replacing various locking primitives (spinlocks, rwlocks, etc.) with diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c -index 90de01cc6..d3b1a03d8 100644 +index 1dc340ac6..ff55b611b 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -358,7 +358,7 @@ void cpuset_read_unlock(void) @@ -12560,7 +12586,7 @@ index 90de01cc6..d3b1a03d8 100644 } free_cpumasks(NULL, &tmpmask); -@@ -2575,7 +2575,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) +@@ -2579,7 +2579,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) cpuset_filetype_t type = seq_cft(sf)->private; int ret = 0; @@ -12569,7 +12595,7 @@ index 90de01cc6..d3b1a03d8 100644 switch (type) { case FILE_CPULIST: -@@ -2602,7 +2602,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) +@@ -2606,7 +2606,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) ret = -EINVAL; } @@ -12578,7 +12604,7 @@ index 90de01cc6..d3b1a03d8 100644 return ret; } -@@ -2923,14 +2923,14 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) +@@ -2927,14 +2927,14 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) cpuset_inc(); @@ -12595,7 +12621,7 @@ index 90de01cc6..d3b1a03d8 100644 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) goto out_unlock; -@@ -2957,7 +2957,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) +@@ -2961,7 +2961,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) } rcu_read_unlock(); @@ -12604,7 +12630,7 @@ index 90de01cc6..d3b1a03d8 100644 cs->mems_allowed = parent->mems_allowed; cs->effective_mems = parent->mems_allowed; cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); -@@ -2965,7 +2965,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) +@@ -2969,7 +2969,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY cpumask_copy(cs->prefer_cpus, parent->prefer_cpus); #endif @@ -12613,7 +12639,7 @@ index 90de01cc6..d3b1a03d8 100644 out_unlock: percpu_up_write(&cpuset_rwsem); put_online_cpus(); -@@ -3021,7 +3021,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) +@@ -3025,7 +3025,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) static void cpuset_bind(struct cgroup_subsys_state *root_css) { percpu_down_write(&cpuset_rwsem); @@ -12622,7 +12648,7 @@ index 90de01cc6..d3b1a03d8 100644 if (is_in_v2_mode()) { cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); -@@ -3032,7 +3032,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css) +@@ -3036,7 +3036,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css) top_cpuset.mems_allowed = top_cpuset.effective_mems; } @@ -12631,7 +12657,7 @@ index 90de01cc6..d3b1a03d8 100644 percpu_up_write(&cpuset_rwsem); } -@@ -3144,12 +3144,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs, +@@ -3148,12 +3148,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs, #endif bool is_empty; @@ -12646,7 +12672,7 @@ index 90de01cc6..d3b1a03d8 100644 /* * Don't call update_tasks_cpumask() if the cpuset becomes empty, -@@ -3193,10 +3193,10 @@ hotplug_update_tasks(struct cpuset *cs, +@@ -3197,10 +3197,10 @@ hotplug_update_tasks(struct cpuset *cs, if (nodes_empty(*new_mems)) *new_mems = parent_cs(cs)->effective_mems; @@ -12659,7 +12685,7 @@ index 90de01cc6..d3b1a03d8 100644 if (cpus_updated) update_tasks_cpumask(cs); -@@ -3263,10 +3263,10 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) +@@ -3267,10 +3267,10 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || (parent->partition_root_state == PRS_ERROR))) { if (cs->nr_subparts_cpus) { @@ -12672,7 +12698,7 @@ index 90de01cc6..d3b1a03d8 100644 compute_effective_cpumask(&new_cpus, cs, parent); } -@@ -3280,9 +3280,9 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) +@@ -3284,9 +3284,9 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) cpumask_empty(&new_cpus)) { update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp); @@ -12684,7 +12710,7 @@ index 90de01cc6..d3b1a03d8 100644 } cpuset_force_rebuild(); } -@@ -3362,7 +3362,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) +@@ -3366,7 +3366,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) /* synchronize cpus_allowed to cpu_active_mask */ if (cpus_updated) { @@ -12693,7 +12719,7 @@ index 90de01cc6..d3b1a03d8 100644 if (!on_dfl) cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); /* -@@ -3382,17 +3382,17 @@ static void cpuset_hotplug_workfn(struct work_struct *work) +@@ -3386,17 +3386,17 @@ static void cpuset_hotplug_workfn(struct work_struct *work) } } cpumask_copy(top_cpuset.effective_cpus, &new_cpus); @@ -12714,7 +12740,7 @@ index 90de01cc6..d3b1a03d8 100644 update_tasks_nodemask(&top_cpuset); } -@@ -3496,11 +3496,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) +@@ -3500,11 +3500,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) { unsigned long flags; @@ -12728,7 +12754,7 @@ index 90de01cc6..d3b1a03d8 100644 } /** -@@ -3561,11 +3561,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) +@@ -3565,11 +3565,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) nodemask_t mask; unsigned long flags; @@ -12742,7 +12768,7 @@ index 90de01cc6..d3b1a03d8 100644 return mask; } -@@ -3657,14 +3657,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask) +@@ -3661,14 +3661,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask) return true; /* Not hardwall and node outside mems_allowed: scan up cpusets */ @@ -12784,10 +12810,10 @@ index d2ae14d0b..7b3bea56d 100644 /* if @may_sleep, play nice and yield if necessary */ if (may_sleep && (need_resched() || diff --git a/kernel/cpu.c b/kernel/cpu.c -index 9eedba9ac..17f106ef2 100644 +index 89a8e7b9f..885b23d8a 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c -@@ -1687,7 +1687,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { +@@ -1700,7 +1700,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { .name = "ap:online", }, /* @@ -12796,7 +12822,7 @@ index 9eedba9ac..17f106ef2 100644 * this itself. */ [CPUHP_TEARDOWN_CPU] = { -@@ -1696,6 +1696,13 @@ static struct cpuhp_step cpuhp_hp_states[] = { +@@ -1709,6 +1709,13 @@ static struct cpuhp_step cpuhp_hp_states[] = { .teardown.single = takedown_cpu, .cant_stop = true, }, @@ -12907,7 +12933,7 @@ index 26a81ea63..c15ca5450 100644 spin_unlock(&sighand->siglock); diff --git a/kernel/fork.c b/kernel/fork.c -index 2547c6a6e..fd07bda90 100644 +index 7e31806a7..1d5addd11 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -42,6 +42,7 @@ @@ -12918,7 +12944,7 @@ index 2547c6a6e..fd07bda90 100644 #include #include #include -@@ -291,7 +292,7 @@ static inline void free_thread_stack(struct task_struct *tsk) +@@ -293,7 +294,7 @@ static inline void free_thread_stack(struct task_struct *tsk) return; } @@ -12927,7 +12953,7 @@ index 2547c6a6e..fd07bda90 100644 return; } #endif -@@ -699,6 +700,19 @@ void __mmdrop(struct mm_struct *mm) +@@ -704,6 +705,19 @@ void __mmdrop(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(__mmdrop); @@ -12947,7 +12973,7 @@ index 2547c6a6e..fd07bda90 100644 static void mmdrop_async_fn(struct work_struct *work) { struct mm_struct *mm; -@@ -740,6 +754,15 @@ void __put_task_struct(struct task_struct *tsk) +@@ -745,6 +759,15 @@ void __put_task_struct(struct task_struct *tsk) WARN_ON(refcount_read(&tsk->usage)); WARN_ON(tsk == current); @@ -12963,7 +12989,7 @@ index 2547c6a6e..fd07bda90 100644 io_uring_free(tsk); cgroup_free(tsk); task_numa_free(tsk, true); -@@ -961,11 +984,13 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) +@@ -966,11 +989,13 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; tsk->pf_io_worker = NULL; @@ -12977,7 +13003,7 @@ index 2547c6a6e..fd07bda90 100644 #ifdef CONFIG_FAULT_INJECTION tsk->fail_nth = 0; #endif -@@ -2102,6 +2127,7 @@ static __latent_entropy struct task_struct *copy_process( +@@ -2113,6 +2138,7 @@ static __latent_entropy struct task_struct *copy_process( spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); @@ -13177,7 +13203,7 @@ index 8806444a6..acbce92f9 100644 if (!noirqdebug) note_interrupt(desc, retval); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 239f5084b..bc59cb61f 100644 +index 227787723..c7bf98810 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1302,6 +1302,8 @@ static int irq_thread(void *data) @@ -13454,7 +13480,7 @@ index fbff25adb..d3466e3ba 100644 +} +early_initcall(irq_work_init_threads); diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c -index b9a6f4658..c26219f34 100644 +index ca1a46960..dade489ea 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -984,7 +984,6 @@ void crash_kexec(struct pt_regs *regs) @@ -13466,10 +13492,10 @@ index b9a6f4658..c26219f34 100644 /* diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c -index 35859da8b..dfff31ed6 100644 +index e20c19e3b..777168d58 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c -@@ -138,6 +138,15 @@ KERNEL_ATTR_RO(vmcoreinfo); +@@ -143,6 +143,15 @@ KERNEL_ATTR_RO(vmcoreinfo); #endif /* CONFIG_CRASH_CORE */ @@ -13485,7 +13511,7 @@ index 35859da8b..dfff31ed6 100644 /* whether file capabilities are enabled */ static ssize_t fscaps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) -@@ -228,6 +237,9 @@ static struct attribute * kernel_attrs[] = { +@@ -233,6 +242,9 @@ static struct attribute * kernel_attrs[] = { #ifndef CONFIG_TINY_RCU &rcu_expedited_attr.attr, &rcu_normal_attr.attr, @@ -16105,7 +16131,7 @@ index 000000000..b61edc4dc + __up_write_unlock(sem, WRITER_BIAS - 1, flags); +} diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c -index 976b20b2d..51e1085e4 100644 +index cc5cc889b..f7c909ef1 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -28,6 +28,7 @@ @@ -16116,7 +16142,7 @@ index 976b20b2d..51e1085e4 100644 #include "lock_events.h" /* -@@ -1512,6 +1513,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) +@@ -1494,6 +1495,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) if (tmp & RWSEM_FLAG_WAITERS) rwsem_downgrade_wake(sem); } @@ -16124,7 +16150,7 @@ index 976b20b2d..51e1085e4 100644 /* * lock for reading -@@ -1675,7 +1677,9 @@ void down_read_non_owner(struct rw_semaphore *sem) +@@ -1657,7 +1659,9 @@ void down_read_non_owner(struct rw_semaphore *sem) { might_sleep(); __down_read(sem); @@ -16134,7 +16160,7 @@ index 976b20b2d..51e1085e4 100644 } EXPORT_SYMBOL(down_read_non_owner); -@@ -1704,7 +1708,9 @@ EXPORT_SYMBOL(down_write_killable_nested); +@@ -1686,7 +1690,9 @@ EXPORT_SYMBOL(down_write_killable_nested); void up_read_non_owner(struct rw_semaphore *sem) { @@ -16384,7 +16410,7 @@ index b1c155328..059c3d876 100644 static inline bool printk_percpu_data_ready(void) { return false; } #endif /* CONFIG_PRINTK */ diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index ecd28d4fa..e95b00f24 100644 +index a1ac84498..45239dcfb 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -44,6 +44,9 @@ @@ -16405,7 +16431,7 @@ index ecd28d4fa..e95b00f24 100644 int console_printk[4] = { CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */ -@@ -227,19 +229,7 @@ static int nr_ext_console_drivers; +@@ -233,19 +235,7 @@ static int nr_ext_console_drivers; static int __down_trylock_console_sem(unsigned long ip) { @@ -16426,7 +16452,7 @@ index ecd28d4fa..e95b00f24 100644 return 1; mutex_acquire(&console_lock_dep_map, 0, 1, ip); return 0; -@@ -248,13 +238,9 @@ static int __down_trylock_console_sem(unsigned long ip) +@@ -254,13 +244,9 @@ static int __down_trylock_console_sem(unsigned long ip) static void __up_console_sem(unsigned long ip) { @@ -16440,7 +16466,7 @@ index ecd28d4fa..e95b00f24 100644 } #define up_console_sem() __up_console_sem(_RET_IP_) -@@ -268,11 +254,6 @@ static void __up_console_sem(unsigned long ip) +@@ -279,11 +265,6 @@ static bool panic_in_progress(void) */ static int console_locked, console_suspended; @@ -16452,7 +16478,7 @@ index ecd28d4fa..e95b00f24 100644 /* * Array of consoles built from command line options (console=) */ -@@ -357,61 +338,43 @@ enum log_flags { +@@ -368,61 +349,43 @@ enum log_flags { LOG_CONT = 8, /* text is a fragment of a continuation line */ }; @@ -16536,7 +16562,7 @@ index ecd28d4fa..e95b00f24 100644 #define LOG_LINE_MAX (1024 - PREFIX_MAX) #define LOG_LEVEL(v) ((v) & 0x07) -@@ -449,11 +412,36 @@ static struct printk_ringbuffer *prb = &printk_rb_static; +@@ -460,11 +423,36 @@ static struct printk_ringbuffer *prb = &printk_rb_static; */ static bool __printk_percpu_data_ready __read_mostly; @@ -16574,7 +16600,7 @@ index ecd28d4fa..e95b00f24 100644 /* Return log buffer address */ char *log_buf_addr_get(void) { -@@ -495,52 +483,6 @@ static void truncate_msg(u16 *text_len, u16 *trunc_msg_len) +@@ -506,52 +494,6 @@ static void truncate_msg(u16 *text_len, u16 *trunc_msg_len) *trunc_msg_len = 0; } @@ -16627,7 +16653,7 @@ index ecd28d4fa..e95b00f24 100644 int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT); static int syslog_action_restricted(int type) -@@ -669,7 +611,7 @@ static ssize_t msg_print_ext_body(char *buf, size_t size, +@@ -680,7 +622,7 @@ static ssize_t msg_print_ext_body(char *buf, size_t size, /* /dev/kmsg - userspace message inject/listen interface */ struct devkmsg_user { @@ -16636,7 +16662,7 @@ index ecd28d4fa..e95b00f24 100644 struct ratelimit_state rs; struct mutex lock; char buf[CONSOLE_EXT_LOG_MAX]; -@@ -770,27 +712,22 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, +@@ -781,27 +723,22 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, if (ret) return ret; @@ -16668,7 +16694,7 @@ index ecd28d4fa..e95b00f24 100644 goto out; } -@@ -799,8 +736,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, +@@ -810,8 +747,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, &r->text_buf[0], r->info->text_len, &r->info->dev_info); @@ -16678,7 +16704,7 @@ index ecd28d4fa..e95b00f24 100644 if (len > count) { ret = -EINVAL; -@@ -835,11 +771,10 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) +@@ -846,11 +782,10 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) if (offset) return -ESPIPE; @@ -16691,7 +16717,7 @@ index ecd28d4fa..e95b00f24 100644 break; case SEEK_DATA: /* -@@ -847,16 +782,15 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) +@@ -858,16 +793,15 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) * like issued by 'dmesg -c'. Reading /dev/kmsg itself * changes no global state, and does not clear anything. */ @@ -16710,7 +16736,7 @@ index ecd28d4fa..e95b00f24 100644 return ret; } -@@ -871,15 +805,13 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait) +@@ -882,15 +816,13 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait) poll_wait(file, &log_wait, wait); @@ -16728,7 +16754,7 @@ index ecd28d4fa..e95b00f24 100644 return ret; } -@@ -912,9 +844,7 @@ static int devkmsg_open(struct inode *inode, struct file *file) +@@ -923,9 +855,7 @@ static int devkmsg_open(struct inode *inode, struct file *file) prb_rec_init_rd(&user->record, &user->info, &user->text_buf[0], sizeof(user->text_buf)); @@ -16739,7 +16765,7 @@ index ecd28d4fa..e95b00f24 100644 file->private_data = user; return 0; -@@ -1006,6 +936,9 @@ void log_buf_vmcoreinfo_setup(void) +@@ -1017,6 +947,9 @@ void log_buf_vmcoreinfo_setup(void) VMCOREINFO_SIZE(atomic_long_t); VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter); @@ -16749,7 +16775,7 @@ index ecd28d4fa..e95b00f24 100644 } #endif -@@ -1077,9 +1010,6 @@ static inline void log_buf_add_cpu(void) {} +@@ -1088,9 +1021,6 @@ static inline void log_buf_add_cpu(void) {} static void __init set_percpu_data_ready(void) { @@ -16759,7 +16785,7 @@ index ecd28d4fa..e95b00f24 100644 __printk_percpu_data_ready = true; } -@@ -1119,7 +1049,6 @@ void __init setup_log_buf(int early) +@@ -1130,7 +1060,6 @@ void __init setup_log_buf(int early) struct printk_record r; size_t new_descs_size; size_t new_infos_size; @@ -16767,7 +16793,7 @@ index ecd28d4fa..e95b00f24 100644 char *new_log_buf; unsigned int free; u64 seq; -@@ -1177,8 +1106,6 @@ void __init setup_log_buf(int early) +@@ -1188,8 +1117,6 @@ void __init setup_log_buf(int early) new_descs, ilog2(new_descs_count), new_infos); @@ -16776,7 +16802,7 @@ index ecd28d4fa..e95b00f24 100644 log_buf_len = new_log_buf_len; log_buf = new_log_buf; new_log_buf_len = 0; -@@ -1194,8 +1121,6 @@ void __init setup_log_buf(int early) +@@ -1205,8 +1132,6 @@ void __init setup_log_buf(int early) */ prb = &printk_rb_dynamic; @@ -16785,7 +16811,7 @@ index ecd28d4fa..e95b00f24 100644 if (seq != prb_next_seq(&printk_rb_static)) { pr_err("dropped %llu messages\n", prb_next_seq(&printk_rb_static) - seq); -@@ -1472,6 +1397,50 @@ static size_t get_record_print_text_size(struct printk_info *info, +@@ -1483,6 +1408,50 @@ static size_t get_record_print_text_size(struct printk_info *info, return ((prefix_len * line_count) + info->text_len + 1); } @@ -16836,7 +16862,7 @@ index ecd28d4fa..e95b00f24 100644 static int syslog_print(char __user *buf, int size) { struct printk_info info; -@@ -1479,19 +1448,19 @@ static int syslog_print(char __user *buf, int size) +@@ -1490,19 +1459,19 @@ static int syslog_print(char __user *buf, int size) char *text; int len = 0; @@ -16860,7 +16886,7 @@ index ecd28d4fa..e95b00f24 100644 break; } if (r.info->seq != syslog_seq) { -@@ -1520,7 +1489,7 @@ static int syslog_print(char __user *buf, int size) +@@ -1531,7 +1500,7 @@ static int syslog_print(char __user *buf, int size) syslog_partial += n; } else n = 0; @@ -16869,7 +16895,7 @@ index ecd28d4fa..e95b00f24 100644 if (!n) break; -@@ -1543,34 +1512,25 @@ static int syslog_print(char __user *buf, int size) +@@ -1554,34 +1523,25 @@ static int syslog_print(char __user *buf, int size) static int syslog_print_all(char __user *buf, int size, bool clear) { struct printk_info info; @@ -16908,7 +16934,7 @@ index ecd28d4fa..e95b00f24 100644 len = 0; prb_for_each_record(seq, prb, seq, &r) { -@@ -1583,20 +1543,20 @@ static int syslog_print_all(char __user *buf, int size, bool clear) +@@ -1594,20 +1554,20 @@ static int syslog_print_all(char __user *buf, int size, bool clear) break; } @@ -16934,7 +16960,7 @@ index ecd28d4fa..e95b00f24 100644 kfree(text); return len; -@@ -1604,9 +1564,21 @@ static int syslog_print_all(char __user *buf, int size, bool clear) +@@ -1615,9 +1575,21 @@ static int syslog_print_all(char __user *buf, int size, bool clear) static void syslog_clear(void) { @@ -16959,7 +16985,7 @@ index ecd28d4fa..e95b00f24 100644 } int do_syslog(int type, char __user *buf, int len, int source) -@@ -1632,8 +1604,9 @@ int do_syslog(int type, char __user *buf, int len, int source) +@@ -1643,8 +1615,9 @@ int do_syslog(int type, char __user *buf, int len, int source) return 0; if (!access_ok(buf, len)) return -EFAULT; @@ -16970,7 +16996,7 @@ index ecd28d4fa..e95b00f24 100644 if (error) return error; error = syslog_print(buf, len); -@@ -1681,10 +1654,10 @@ int do_syslog(int type, char __user *buf, int len, int source) +@@ -1692,10 +1665,10 @@ int do_syslog(int type, char __user *buf, int len, int source) break; /* Number of chars in the log buffer */ case SYSLOG_ACTION_SIZE_UNREAD: @@ -16983,7 +17009,7 @@ index ecd28d4fa..e95b00f24 100644 return 0; } if (info.seq != syslog_seq) { -@@ -1712,7 +1685,7 @@ int do_syslog(int type, char __user *buf, int len, int source) +@@ -1723,7 +1696,7 @@ int do_syslog(int type, char __user *buf, int len, int source) } error -= syslog_partial; } @@ -16992,7 +17018,7 @@ index ecd28d4fa..e95b00f24 100644 break; /* Size of the log buffer */ case SYSLOG_ACTION_SIZE_BUFFER: -@@ -1731,221 +1704,191 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) +@@ -1742,369 +1715,439 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) return do_syslog(type, buf, len, SYSLOG_FROM_READER); } @@ -17001,43 +17027,31 @@ index ecd28d4fa..e95b00f24 100644 - * They allow to pass console_lock to another printk() call using a busy wait. - */ +int printk_delay_msec __read_mostly; ++ ++static inline void printk_delay(int level) ++{ ++ boot_delay_msec(level); -#ifdef CONFIG_LOCKDEP -static struct lockdep_map console_owner_dep_map = { - .name = "console_owner" -}; -#endif -+static inline void printk_delay(int level) -+{ -+ boot_delay_msec(level); -+ + if (unlikely(printk_delay_msec)) { + int m = printk_delay_msec; - --static DEFINE_RAW_SPINLOCK(console_owner_lock); --static struct task_struct *console_owner; --static bool console_waiter; ++ + while (m--) { + mdelay(1); + touch_nmi_watchdog(); + } + } +} - --#if defined(CONFIG_X86) || defined(CONFIG_ARM64_PSEUDO_NMI) --void zap_locks(void) ++ +static bool kernel_sync_mode(void) - { -- if (raw_spin_is_locked(&logbuf_lock)) { -- debug_locks_off(); -- raw_spin_lock_init(&logbuf_lock); -- } ++ { + return (oops_in_progress || sync_mode); +} - -- if (raw_spin_is_locked(&console_owner_lock)) { -- raw_spin_lock_init(&console_owner_lock); -- } ++ +static bool console_can_sync(struct console *con) +{ + if (!(con->flags & CON_ENABLED)) @@ -17050,9 +17064,7 @@ index ecd28d4fa..e95b00f24 100644 + return true; + return false; +} - -- console_owner = NULL; -- console_waiter = false; ++ +static bool call_sync_console_driver(struct console *con, const char *text, size_t text_len) +{ + if (!(con->flags & CON_ENABLED)) @@ -17065,11 +17077,127 @@ index ecd28d4fa..e95b00f24 100644 + con->write(con, text, text_len); + else + return false; ++ ++ return true; ++ } + +-static DEFINE_RAW_SPINLOCK(console_owner_lock); +-static struct task_struct *console_owner; +-static bool console_waiter; + ++static bool have_atomic_console(void) ++ { ++ ++ struct console *con; ++ ++ for_each_console(con) { ++ if (!(con->flags & CON_ENABLED)) ++ continue; ++ if (con->write_atomic) ++ return true; ++ } ++ return false; ++ } ++ ++static bool print_sync(struct console *con, u64 *seq) ++ { ++ struct printk_info info; ++ struct printk_record r; ++ size_t text_len; ++ ++ prb_rec_init_rd(&r, &info, &con->sync_buf[0], sizeof(con->sync_buf)); ++ ++ if (!prb_read_valid(prb, *seq, &r)) ++ return false; ++ ++ text_len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); ++ ++ if (!call_sync_console_driver(con, &con->sync_buf[0], text_len)) ++ return false; ++ ++ *seq = r.info->seq; ++ ++ touch_softlockup_watchdog_sync(); ++ clocksource_touch_watchdog(); ++ rcu_cpu_stall_reset(); ++ touch_nmi_watchdog(); ++ ++ if (text_len) ++ printk_delay(r.info->level); ++ ++ return true; ++} ++ ++static void print_sync_until(struct console *con, u64 seq) ++{ ++ unsigned int flags; ++ u64 printk_seq; ++ ++ console_atomic_lock(&flags); ++ for (;;) { ++ printk_seq = atomic64_read(&con->printk_seq); ++ if (printk_seq >= seq) ++ break; ++ if (!print_sync(con, &printk_seq)) ++ break; ++ atomic64_set(&con->printk_seq, printk_seq + 1); ++ } ++ console_atomic_unlock(flags); ++} ++ + #if defined(CONFIG_X86) || defined(CONFIG_ARM64_PSEUDO_NMI) + void zap_locks(void) + { +- if (raw_spin_is_locked(&logbuf_lock)) { +- debug_locks_off(); +- raw_spin_lock_init(&logbuf_lock); +- } ++// if (raw_spin_is_locked(&logbuf_lock)) { ++// debug_locks_off(); ++// raw_spin_lock_init(&logbuf_lock); ++// } ++ ++// if (raw_spin_is_locked(&console_owner_lock)) { ++// raw_spin_lock_init(&console_owner_lock); ++// } + +- if (raw_spin_is_locked(&console_owner_lock)) { +- raw_spin_lock_init(&console_owner_lock); +- } +- +- console_owner = NULL; +- console_waiter = false; ++// console_owner = NULL; ++// console_waiter = false; - sema_init(&console_sem, 1); -+ return true; - } --#endif +-} ++// sema_init(&console_sem, 1); ++ } ++#endif ++ ++#ifdef CONFIG_PRINTK_NMI ++#define NUM_RECURSION_CTX 2 ++#else ++#define NUM_RECURSION_CTX 1 + #endif ++ ++struct printk_recursion { ++ char count[NUM_RECURSION_CTX]; ++}; ++ ++static DEFINE_PER_CPU(struct printk_recursion, percpu_printk_recursion); ++static char printk_recursion_count[NUM_RECURSION_CTX]; ++ ++static char *printk_recursion_counter(void) ++{ ++ struct printk_recursion *rec; ++ char *count; ++ ++ if (!printk_percpu_data_ready()) { ++ count = &printk_recursion_count[0]; ++ } else { ++ rec = this_cpu_ptr(&percpu_printk_recursion); -/** - * console_lock_spinning_enable - mark beginning of code where another @@ -17081,23 +17209,16 @@ index ecd28d4fa..e95b00f24 100644 - * ready to hand over the lock at the end of the section. - */ -static void console_lock_spinning_enable(void) -+static bool have_atomic_console(void) - { +-{ - raw_spin_lock(&console_owner_lock); - console_owner = current; - raw_spin_unlock(&console_owner_lock); -+ struct console *con; ++ count = &rec->count[0]; ++ } - /* The waiter may spin on us after setting console_owner */ - spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); -+ for_each_console(con) { -+ if (!(con->flags & CON_ENABLED)) -+ continue; -+ if (con->write_atomic) -+ return true; -+ } -+ return false; - } +-} -/** - * console_lock_spinning_disable_and_check - mark end of code where another @@ -17115,33 +17236,52 @@ index ecd28d4fa..e95b00f24 100644 - * Return: 1 if the lock rights were passed, 0 otherwise. - */ -static int console_lock_spinning_disable_and_check(void) -+static bool print_sync(struct console *con, u64 *seq) - { +-{ - int waiter; -+ struct printk_info info; -+ struct printk_record r; -+ size_t text_len; ++#ifdef CONFIG_PRINTK_NMI ++ if (in_nmi()) ++ count++; ++#endif ++ ++ return count; ++} ++ ++static bool printk_enter_irqsave(unsigned long *flags) ++ { - raw_spin_lock(&console_owner_lock); - waiter = READ_ONCE(console_waiter); - console_owner = NULL; - raw_spin_unlock(&console_owner_lock); -+ prb_rec_init_rd(&r, &info, &con->sync_buf[0], sizeof(con->sync_buf)); ++ char *count; ++ ++ local_irq_save(*flags); ++ count = printk_recursion_counter(); ++ /* Only 1 level of recursion allowed. */ ++ if (*count > 1) { ++ local_irq_restore(*flags); ++ return false; ++ } ++ (*count)++; - if (!waiter) { - spin_release(&console_owner_dep_map, _THIS_IP_); - return 0; - } -+ if (!prb_read_valid(prb, *seq, &r)) -+ return false; ++ return true; ++} - /* The waiter is now free to continue */ - WRITE_ONCE(console_waiter, false); -+ text_len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); ++static void printk_exit_irqrestore(unsigned long flags) ++{ ++ char *count; - spin_release(&console_owner_dep_map, _THIS_IP_); -+ if (!call_sync_console_driver(con, &con->sync_buf[0], text_len)) -+ return false; ++ count = printk_recursion_counter(); ++ (*count)--; ++ local_irq_restore(flags); ++ } - /* - * Hand off console_lock to waiter. The waiter will perform @@ -17149,39 +17289,134 @@ index ecd28d4fa..e95b00f24 100644 - */ - mutex_release(&console_lock_dep_map, _THIS_IP_); - return 1; --} -+ *seq = r.info->seq; ++static inline u32 printk_caller_id(void) ++{ ++ return in_task() ? task_pid_nr(current) : ++ 0x80000000 + raw_smp_processor_id(); + } --/** ++ + /** - * console_trylock_spinning - try to get console_lock by busy waiting -- * ++ * parse_prefix - Parse level and control flags. ++ * ++ * @text: The terminated text message. ++ * @level: A pointer to the current level value, will be updated. ++ * @lflags: A pointer to the current log flags, will be updated. + * - * This allows to busy wait for the console_lock when the current - * owner is running in specially marked sections. It means that - * the current owner is running and cannot reschedule until it - * is ready to lose the lock. -- * ++ * @level may be NULL if the caller is not interested in the parsed value. ++ * Otherwise the variable pointed to by @level must be set to ++ * LOGLEVEL_DEFAULT in order to be updated with the parsed value. + * - * Return: 1 if we got the lock, 0 othrewise -- */ ++ * @lflags may be NULL if the caller is not interested in the parsed value. ++ * Otherwise the variable pointed to by @lflags will be OR'd with the parsed ++ * value. ++ * ++ * Return: The length of the parsed level and control flags. + */ -static int console_trylock_spinning(void) --{ ++static u16 parse_prefix(char *text, int *level, enum log_flags *lflags) ++ { ++ u16 prefix_len = 0; ++ int kern_level; ++ ++ while (*text) { ++ kern_level = printk_get_level(text); ++ if (!kern_level) ++ break; ++ ++ switch (kern_level) { ++ case '0' ... '7': ++ if (level && *level == LOGLEVEL_DEFAULT) ++ *level = kern_level - '0'; ++ break; ++ case 'c': /* KERN_CONT */ ++ if (lflags) ++ *lflags |= LOG_CONT; ++ } ++ ++ prefix_len += 2; ++ text += 2; ++ } ++ ++ return prefix_len; ++ } ++ ++static u16 printk_sprint(char *text, u16 size, int facility, enum log_flags *lflags, ++ const char *fmt, va_list args) ++ { ++ u16 text_len; ++ ++ text_len = vscnprintf(text, size, fmt, args); ++ ++ /* Mark and strip a trailing newline. */ ++ if (text_len && text[text_len - 1] == '\n') { ++ text_len--; ++ *lflags |= LOG_NEWLINE; ++ } ++ ++ /* Strip log level and control flags. */ ++ if (facility == 0) { ++ u16 prefix_len; ++ ++ prefix_len = parse_prefix(text, NULL, NULL); ++ if (prefix_len) { ++ text_len -= prefix_len; ++ memmove(text, text + prefix_len, text_len); ++ } ++ } ++ ++ return text_len; ++ } ++ ++__printf(4, 0) ++static int vprintk_store(int facility, int level, ++ const struct dev_printk_info *dev_info, ++ const char *fmt, va_list args) + { - struct task_struct *owner = NULL; - bool waiter; - bool spin = false; - unsigned long flags; -+ touch_softlockup_watchdog_sync(); -+ clocksource_touch_watchdog(); -+ rcu_cpu_stall_reset(); -+ touch_nmi_watchdog(); - +- - if (console_trylock()) - return 1; -+ if (text_len) -+ printk_delay(r.info->level); ++ const u32 caller_id = printk_caller_id(); ++ struct prb_reserved_entry e; ++ enum log_flags lflags = 0; ++ bool final_commit = false; ++ struct printk_record r; ++ unsigned long irqflags; ++ u16 trunc_msg_len = 0; ++ char prefix_buf[8]; ++ u16 reserve_size; ++ va_list args2; ++ u16 text_len; ++ int ret = 0; ++ u64 ts_nsec; ++ u64 seq; + /* +- * It's unsafe to spin once a panic has begun. If we are the +- * panic CPU, we may have already halted the owner of the +- * console_sem. If we are not the panic CPU, then we should +- * avoid taking console_sem, so the panic CPU has a better +- * chance of cleanly acquiring it later. ++ * Since the duration of printk() can vary depending on the message ++ * and state of the ringbuffer, grab the timestamp now so that it is ++ * close to the call of printk(). This provides a more deterministic ++ * timestamp with respect to the caller. + */ +- if (panic_in_progress()) +- return 0; +- - printk_safe_enter_irqsave(flags); -+ return true; -+} - +- - raw_spin_lock(&console_owner_lock); - owner = READ_ONCE(console_owner); - waiter = READ_ONCE(console_waiter); @@ -17190,10 +17425,7 @@ index ecd28d4fa..e95b00f24 100644 - spin = true; - } - raw_spin_unlock(&console_owner_lock); -+static void print_sync_until(struct console *con, u64 seq) -+{ -+ unsigned int flags; -+ u64 printk_seq; ++ ts_nsec = local_clock(); - /* - * If there is an active printk() writing to the @@ -17206,18 +17438,9 @@ index ecd28d4fa..e95b00f24 100644 - */ - if (!spin) { - printk_safe_exit_irqrestore(flags); -- return 0; -+ console_atomic_lock(&flags); -+ for (;;) { -+ printk_seq = atomic64_read(&con->printk_seq); -+ if (printk_seq >= seq) -+ break; -+ if (!print_sync(con, &printk_seq)) -+ break; -+ atomic64_set(&con->printk_seq, printk_seq + 1); - } -+ console_atomic_unlock(flags); -+} ++ if (!printk_enter_irqsave(&irqflags)) + return 0; +- } - /* We spin waiting for the owner to release us */ - spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); @@ -17225,34 +17448,23 @@ index ecd28d4fa..e95b00f24 100644 - while (READ_ONCE(console_waiter)) - cpu_relax(); - spin_release(&console_owner_dep_map, _THIS_IP_); -+#if defined(CONFIG_X86) || defined(CONFIG_ARM64_PSEUDO_NMI) -+void zap_locks(void) -+{ -+// if (raw_spin_is_locked(&logbuf_lock)) { -+// debug_locks_off(); -+// raw_spin_lock_init(&logbuf_lock); -+// } - +- - printk_safe_exit_irqrestore(flags); -- /* + /* - * The owner passed the console lock to us. - * Since we did not spin on console lock, annotate - * this as a trylock. Otherwise lockdep will - * complain. -- */ ++ * The sprintf needs to come first since the syslog prefix might be ++ * passed in as a parameter. An extra byte must be reserved so that ++ * later the vscnprintf() into the reserved buffer has room for the ++ * terminating '\0', which is not counted by vsnprintf(). + */ - mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_); -+// if (raw_spin_is_locked(&console_owner_lock)) { -+// raw_spin_lock_init(&console_owner_lock); -+// } - +- - return 1; -+// console_owner = NULL; -+// console_waiter = false; -+ -+// sema_init(&console_sem, 1); - } -+#endif - +-} +- -/* - * Call the console drivers, asking them to write out - * log_buf[start] to log_buf[end - 1]. @@ -17264,33 +17476,19 @@ index ecd28d4fa..e95b00f24 100644 - static char dropped_text[64]; - size_t dropped_len = 0; - struct console *con; -+#ifdef CONFIG_PRINTK_NMI -+#define NUM_RECURSION_CTX 2 -+#else -+#define NUM_RECURSION_CTX 1 -+#endif - +- - trace_console_rcuidle(text, len); -+struct printk_recursion { -+ char count[NUM_RECURSION_CTX]; -+}; - +- - if (!console_drivers) - return; -+static DEFINE_PER_CPU(struct printk_recursion, percpu_printk_recursion); -+static char printk_recursion_count[NUM_RECURSION_CTX]; - +- - if (console_dropped) { - dropped_len = snprintf(dropped_text, sizeof(dropped_text), - "** %lu printk messages dropped **\n", - console_dropped); - console_dropped = 0; - } -+static char *printk_recursion_counter(void) -+{ -+ struct printk_recursion *rec; -+ char *count; - +- - for_each_console(con) { - if (exclusive_console && con != exclusive_console) - continue; @@ -17308,158 +17506,106 @@ index ecd28d4fa..e95b00f24 100644 - con->write(con, dropped_text, dropped_len); - con->write(con, text, len); - } -+ if (!printk_percpu_data_ready()) { -+ count = &printk_recursion_count[0]; -+ } else { -+ rec = this_cpu_ptr(&percpu_printk_recursion); -+ -+ count = &rec->count[0]; - } +- } -} - +- -int printk_delay_msec __read_mostly; -+#ifdef CONFIG_PRINTK_NMI -+ if (in_nmi()) -+ count++; -+#endif -+ -+ return count; -+} ++ va_copy(args2, args); ++ reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1; ++ va_end(args2); -static inline void printk_delay(void) -+static bool printk_enter_irqsave(unsigned long *flags) - { +-{ - if (unlikely(printk_delay_msec)) { - int m = printk_delay_msec; -+ char *count; ++ if (reserve_size > LOG_LINE_MAX) ++ reserve_size = LOG_LINE_MAX; - while (m--) { - mdelay(1); - touch_nmi_watchdog(); - } -+ local_irq_save(*flags); -+ count = printk_recursion_counter(); -+ /* Only 1 level of recursion allowed. */ -+ if (*count > 1) { -+ local_irq_restore(*flags); -+ return false; - } -+ (*count)++; -+ -+ return true; -+} -+ -+static void printk_exit_irqrestore(unsigned long flags) -+{ -+ char *count; -+ -+ count = printk_recursion_counter(); -+ (*count)--; -+ local_irq_restore(flags); - } +- } +-} ++ /* Extract log level or control flags. */ ++ if (facility == 0) ++ parse_prefix(&prefix_buf[0], &level, &lflags); - static inline u32 printk_caller_id(void) -@@ -1954,144 +1897,248 @@ static inline u32 printk_caller_id(void) - 0x80000000 + raw_smp_processor_id(); - } +-static inline u32 printk_caller_id(void) +-{ +- return in_task() ? task_pid_nr(current) : +- 0x80000000 + raw_smp_processor_id(); +-} ++ if (level == LOGLEVEL_DEFAULT) ++ level = default_message_loglevel; -static size_t log_output(int facility, int level, enum log_flags lflags, - const struct dev_printk_info *dev_info, - char *text, size_t text_len) -+/** -+ * parse_prefix - Parse level and control flags. -+ * -+ * @text: The terminated text message. -+ * @level: A pointer to the current level value, will be updated. -+ * @lflags: A pointer to the current log flags, will be updated. -+ * -+ * @level may be NULL if the caller is not interested in the parsed value. -+ * Otherwise the variable pointed to by @level must be set to -+ * LOGLEVEL_DEFAULT in order to be updated with the parsed value. -+ * -+ * @lflags may be NULL if the caller is not interested in the parsed value. -+ * Otherwise the variable pointed to by @lflags will be OR'd with the parsed -+ * value. -+ * -+ * Return: The length of the parsed level and control flags. -+ */ -+static u16 parse_prefix(char *text, int *level, enum log_flags *lflags) - { +-{ - const u32 caller_id = printk_caller_id(); -+ u16 prefix_len = 0; -+ int kern_level; ++ if (dev_info) ++ lflags |= LOG_NEWLINE; -- if (lflags & LOG_CONT) { + if (lflags & LOG_CONT) { - struct prb_reserved_entry e; - struct printk_record r; -+ while (*text) { -+ kern_level = printk_get_level(text); -+ if (!kern_level) -+ break; - +- - prb_rec_init_wr(&r, text_len); -- if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) { ++ prb_rec_init_wr(&r, reserve_size); + if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) { - memcpy(&r.text_buf[r.info->text_len], text, text_len); -- r.info->text_len += text_len; -- if (lflags & LOG_NEWLINE) { -- r.info->flags |= LOG_NEWLINE; -- prb_final_commit(&e); -- } else { -- prb_commit(&e); -- } -- return text_len; -+ switch (kern_level) { -+ case '0' ... '7': -+ if (level && *level == LOGLEVEL_DEFAULT) -+ *level = kern_level - '0'; -+ break; -+ case 'c': /* KERN_CONT */ -+ if (lflags) -+ *lflags |= LOG_CONT; - } ++ seq = r.info->seq; ++ text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size, ++ facility, &lflags, fmt, args); + r.info->text_len += text_len; + -+ prefix_len += 2; -+ text += 2; + if (lflags & LOG_NEWLINE) { + r.info->flags |= LOG_NEWLINE; + prb_final_commit(&e); ++ final_commit = true; + } else { + prb_commit(&e); + } +- return text_len; ++ ++ ret = text_len; ++ goto out; + } } - /* Store it in the record log */ - return log_store(caller_id, facility, level, lflags, 0, - dev_info, text, text_len); -+ return prefix_len; - } - +-} +- -/* Must be called under logbuf_lock. */ -int vprintk_store(int facility, int level, - const struct dev_printk_info *dev_info, - const char *fmt, va_list args) -+static u16 printk_sprint(char *text, u16 size, int facility, enum log_flags *lflags, -+ const char *fmt, va_list args) - { +-{ - static char textbuf[LOG_LINE_MAX]; - char *text = textbuf; - size_t text_len; - enum log_flags lflags = 0; -+ u16 text_len; - -- /* +- + /* - * The printf needs to come first; we need the syslog - * prefix which might be passed-in as a parameter. -- */ ++ * Explicitly initialize the record before every prb_reserve() call. ++ * prb_reserve_in_last() and prb_reserve() purposely invalidate the ++ * structure when they fail. + */ - text_len = vscnprintf(text, sizeof(textbuf), fmt, args); -+ text_len = vscnprintf(text, size, fmt, args); - +- - /* mark and strip a trailing newline */ - if (text_len && text[text_len-1] == '\n') { -+ /* Mark and strip a trailing newline. */ -+ if (text_len && text[text_len - 1] == '\n') { - text_len--; +- text_len--; - lflags |= LOG_NEWLINE; -+ *lflags |= LOG_NEWLINE; - } - +- } +- - /* strip kernel syslog prefix and extract log level or control flags */ -+ /* Strip log level and control flags. */ - if (facility == 0) { +- if (facility == 0) { - int kern_level; - - while ((kern_level = printk_get_level(text)) != 0) { @@ -17471,120 +17617,23 @@ index ecd28d4fa..e95b00f24 100644 - case 'c': /* KERN_CONT */ - lflags |= LOG_CONT; - } -+ u16 prefix_len; - -- text_len -= 2; -- text += 2; -+ prefix_len = parse_prefix(text, NULL, NULL); -+ if (prefix_len) { -+ text_len -= prefix_len; -+ memmove(text, text + prefix_len, text_len); - } - } - -- if (level == LOGLEVEL_DEFAULT) -- level = default_message_loglevel; -- -- if (dev_info) -- lflags |= LOG_NEWLINE; -- -- return log_output(facility, level, lflags, dev_info, text, text_len); -+ return text_len; - } - --asmlinkage int vprintk_emit(int facility, int level, -+__printf(4, 0) -+static int vprintk_store(int facility, int level, -+ const struct dev_printk_info *dev_info, -+ const char *fmt, va_list args) -+{ -+ const u32 caller_id = printk_caller_id(); -+ struct prb_reserved_entry e; -+ enum log_flags lflags = 0; -+ bool final_commit = false; -+ struct printk_record r; -+ unsigned long irqflags; -+ u16 trunc_msg_len = 0; -+ char prefix_buf[8]; -+ u16 reserve_size; -+ va_list args2; -+ u16 text_len; -+ int ret = 0; -+ u64 ts_nsec; -+ u64 seq; -+ -+ /* -+ * Since the duration of printk() can vary depending on the message -+ * and state of the ringbuffer, grab the timestamp now so that it is -+ * close to the call of printk(). This provides a more deterministic -+ * timestamp with respect to the caller. -+ */ -+ ts_nsec = local_clock(); -+ -+ if (!printk_enter_irqsave(&irqflags)) -+ return 0; -+ -+ /* -+ * The sprintf needs to come first since the syslog prefix might be -+ * passed in as a parameter. An extra byte must be reserved so that -+ * later the vscnprintf() into the reserved buffer has room for the -+ * terminating '\0', which is not counted by vsnprintf(). -+ */ -+ va_copy(args2, args); -+ reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1; -+ va_end(args2); -+ -+ if (reserve_size > LOG_LINE_MAX) -+ reserve_size = LOG_LINE_MAX; -+ -+ /* Extract log level or control flags. */ -+ if (facility == 0) -+ parse_prefix(&prefix_buf[0], &level, &lflags); -+ -+ if (level == LOGLEVEL_DEFAULT) -+ level = default_message_loglevel; -+ -+ if (dev_info) -+ lflags |= LOG_NEWLINE; -+ -+ if (lflags & LOG_CONT) { -+ prb_rec_init_wr(&r, reserve_size); -+ if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) { -+ seq = r.info->seq; -+ text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size, -+ facility, &lflags, fmt, args); -+ r.info->text_len += text_len; -+ -+ if (lflags & LOG_NEWLINE) { -+ r.info->flags |= LOG_NEWLINE; -+ prb_final_commit(&e); -+ final_commit = true; -+ } else { -+ prb_commit(&e); -+ } -+ -+ ret = text_len; -+ goto out; -+ } -+ } -+ -+ /* -+ * Explicitly initialize the record before every prb_reserve() call. -+ * prb_reserve_in_last() and prb_reserve() purposely invalidate the -+ * structure when they fail. -+ */ + prb_rec_init_wr(&r, reserve_size); + if (!prb_reserve(&e, prb, &r)) { + /* truncate the message if it is too long for empty buffer */ + truncate_msg(&reserve_size, &trunc_msg_len); -+ + +- text_len -= 2; +- text += 2; +- } + prb_rec_init_wr(&r, reserve_size + trunc_msg_len); + if (!prb_reserve(&e, prb, &r)) + goto out; -+ } -+ + } + +- if (level == LOGLEVEL_DEFAULT) +- level = default_message_loglevel; + seq = r.info->seq; -+ + + /* fill message */ + text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &lflags, fmt, args); + if (trunc_msg_len) @@ -17595,59 +17644,53 @@ index ecd28d4fa..e95b00f24 100644 + r.info->flags = lflags & 0x1f; + r.info->ts_nsec = ts_nsec; + r.info->caller_id = caller_id; -+ if (dev_info) + if (dev_info) +- lflags |= LOG_NEWLINE; +- +- return log_output(facility, level, lflags, dev_info, text, text_len); +-} +- +-asmlinkage int vprintk_emit(int facility, int level, +- const struct dev_printk_info *dev_info, +- const char *fmt, va_list args) +-{ +- int printed_len; +- bool in_sched = false; +- unsigned long flags; +- +- /* Suppress unimportant messages after panic happens */ +- if (unlikely(suppress_printk)) +- return 0; +- +- if (unlikely(suppress_panic_printk) && +- atomic_read(&panic_cpu) != raw_smp_processor_id()) +- return 0; + memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info)); -+ + +- if (level == LOGLEVEL_SCHED) { +- level = LOGLEVEL_DEFAULT; +- in_sched = true; + /* A message without a trailing newline can be continued. */ + if (!(lflags & LOG_NEWLINE)) { + prb_commit(&e); + } else { + prb_final_commit(&e); + final_commit = true; -+ } -+ + } + +- boot_delay_msec(level); +- printk_delay(); +- +- /* This stops the holder of console_sem just where we want him */ +- logbuf_lock_irqsave(flags); +- printed_len = vprintk_store(facility, level, dev_info, fmt, args); +- logbuf_unlock_irqrestore(flags); + ret = text_len + trunc_msg_len; +out: + /* only the kernel may perform synchronous printing */ + if (facility == 0 && final_commit) { + struct console *con; -+ -+ for_each_console(con) { -+ if (console_can_sync(con)) -+ print_sync_until(con, seq + 1); -+ } -+ } -+ -+ printk_exit_irqrestore(irqflags); -+ return ret; -+} -+ -+asmlinkage int vprintk_emit(int facility, int level, - const struct dev_printk_info *dev_info, - const char *fmt, va_list args) - { - int printed_len; -- bool in_sched = false; -- unsigned long flags; - /* Suppress unimportant messages after panic happens */ - if (unlikely(suppress_printk)) - return 0; - -- if (level == LOGLEVEL_SCHED) { -+ if (level == LOGLEVEL_SCHED) - level = LOGLEVEL_DEFAULT; -- in_sched = true; -- } -- -- boot_delay_msec(level); -- printk_delay(); - -- /* This stops the holder of console_sem just where we want him */ -- logbuf_lock_irqsave(flags); - printed_len = vprintk_store(facility, level, dev_info, fmt, args); -- logbuf_unlock_irqrestore(flags); -- - /* If called from the scheduler, we can not call up(). */ - if (!in_sched) { - /* @@ -17664,45 +17707,71 @@ index ecd28d4fa..e95b00f24 100644 - if (console_trylock_spinning()) - console_unlock(); - preempt_enable(); -- } ++ for_each_console(con) { ++ if (console_can_sync(con)) ++ print_sync_until(con, seq + 1); ++ } + } - wake_up_klogd(); - return printed_len; +- wake_up_klogd(); +- return printed_len; ++ printk_exit_irqrestore(irqflags); ++ return ret; } - EXPORT_SYMBOL(vprintk_emit); - --asmlinkage int vprintk(const char *fmt, va_list args) +-EXPORT_SYMBOL(vprintk_emit); ++ ++asmlinkage int vprintk_emit(int facility, int level, ++ const struct dev_printk_info *dev_info, ++ const char *fmt, va_list args) ++ { ++ int printed_len; ++ ++ /* Suppress unimportant messages after panic happens */ ++ if (unlikely(suppress_printk)) ++ return 0; ++ ++ if (level == LOGLEVEL_SCHED) ++ level = LOGLEVEL_DEFAULT; ++ ++ ++ wake_up_klogd(); ++ return printed_len; ++ } ++ EXPORT_SYMBOL(vprintk_emit); ++ +__printf(1, 0) +static int vprintk_default(const char *fmt, va_list args) - { -- return vprintk_func(fmt, args); ++ { + return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args); - } --EXPORT_SYMBOL(vprintk); - --int vprintk_default(const char *fmt, va_list args) ++ } ++ +__printf(1, 0) +static int vprintk_func(const char *fmt, va_list args) - { -- return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args); ++ { +#ifdef CONFIG_KGDB_KDB + /* Allow to pass printk() to kdb but avoid a recursion. */ + if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0)) + return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args); +#endif + return vprintk_default(fmt, args); - } --EXPORT_SYMBOL_GPL(vprintk_default); -+ -+asmlinkage int vprintk(const char *fmt, va_list args) -+{ -+ return vprintk_func(fmt, args); -+} -+EXPORT_SYMBOL(vprintk); ++ } + asmlinkage int vprintk(const char *fmt, va_list args) + { +@@ -2112,12 +2155,6 @@ asmlinkage int vprintk(const char *fmt, va_list args) + } + EXPORT_SYMBOL(vprintk); + +-int vprintk_default(const char *fmt, va_list args) +-{ +- return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args); +-} +-EXPORT_SYMBOL_GPL(vprintk_default); +- /** * printk - print a kernel message -@@ -2127,38 +2174,158 @@ asmlinkage __visible int printk(const char *fmt, ...) + * @fmt: format string +@@ -2152,38 +2189,158 @@ asmlinkage __visible int printk(const char *fmt, ...) } EXPORT_SYMBOL(printk); @@ -17723,10 +17792,7 @@ index ecd28d4fa..e95b00f24 100644 + size_t len; + int error; + u64 seq; - --#define LOG_LINE_MAX 0 --#define PREFIX_MAX 0 --#define printk_time false ++ + if (con->flags & CON_EXTENDED) { + ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL); + if (!ext_text) @@ -17737,19 +17803,22 @@ index ecd28d4fa..e95b00f24 100644 + if (!text || !dropped_text) + goto out; --#define prb_read_valid(rb, seq, r) false --#define prb_first_valid_seq(rb) 0 +-#define LOG_LINE_MAX 0 +-#define PREFIX_MAX 0 +-#define printk_time false + if (con->flags & CON_EXTENDED) + write_text = ext_text; + else + write_text = text; +-#define prb_read_valid(rb, seq, r) false +-#define prb_first_valid_seq(rb) 0 ++ seq = atomic64_read(&con->printk_seq); + -static u64 syslog_seq; -static u64 console_seq; -static u64 exclusive_console_stop_seq; -static unsigned long console_dropped; -+ seq = atomic64_read(&con->printk_seq); -+ + prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX); + + for (;;) { @@ -17758,9 +17827,7 @@ index ecd28d4fa..e95b00f24 100644 + + if (kthread_should_stop()) + break; - --static size_t record_print_text(const struct printk_record *r, -- bool syslog, bool time) ++ + if (error) + continue; + @@ -17795,7 +17862,9 @@ index ecd28d4fa..e95b00f24 100644 + + console_lock(); + console_may_schedule = 0; -+ + +-static size_t record_print_text(const struct printk_record *r, +- bool syslog, bool time) + if (kernel_sync_mode() && con->write_atomic) { + console_unlock(); + break; @@ -17885,7 +17954,7 @@ index ecd28d4fa..e95b00f24 100644 #endif /* CONFIG_PRINTK */ -@@ -2403,34 +2570,6 @@ int is_console_locked(void) +@@ -2428,21 +2585,6 @@ int is_console_locked(void) } EXPORT_SYMBOL(is_console_locked); @@ -17904,7 +17973,13 @@ index ecd28d4fa..e95b00f24 100644 - - return 0; -} -- + + /* + * Return true when this CPU should unlock console_sem without pushing all +@@ -2463,17 +2605,6 @@ static bool abandon_console_lock_in_panic(void) + return atomic_read(&panic_cpu) != raw_smp_processor_id(); + } + -/* - * Can we actually use the console at this time on this cpu? - * @@ -17916,26 +17991,27 @@ index ecd28d4fa..e95b00f24 100644 -{ - return cpu_online(raw_smp_processor_id()) || have_callable_console(); -} -- + /** * console_unlock - unlock the console system - * -@@ -2447,142 +2586,14 @@ static inline int can_use_console(void) +@@ -2490,153 +2621,14 @@ static inline int can_use_console(void) + * console_unlock(); may be called from any context. */ void console_unlock(void) - { +-{ - static char ext_text[CONSOLE_EXT_LOG_MAX]; - static char text[LOG_LINE_MAX + PREFIX_MAX]; +- static int panic_console_dropped; - unsigned long flags; - bool do_cond_resched, retry; - struct printk_info info; - struct printk_record r; - - if (console_suspended) { - up_console_sem(); - return; - } - +- if (console_suspended) { +- up_console_sem(); +- return; +- } +- - prb_rec_init_rd(&r, &info, text, sizeof(text)); - - /* @@ -17980,6 +18056,10 @@ index ecd28d4fa..e95b00f24 100644 - if (console_seq != r.info->seq) { - console_dropped += r.info->seq - console_seq; - console_seq = r.info->seq; +- if (panic_in_progress() && panic_console_dropped++ > 10) { +- suppress_panic_printk = 1; +- pr_warn_once("Too many dropped messages. Supress message on non-panic CPUs to prevent livelock.\n"); +- } - } - - if (suppress_message_printing(r.info->level)) { @@ -18037,15 +18117,19 @@ index ecd28d4fa..e95b00f24 100644 - - printk_safe_exit_irqrestore(flags); - +- /* Allow panic_cpu to take over the consoles safely */ +- if (abandon_console_lock_in_panic()) +- break; +- - if (do_cond_resched) - cond_resched(); - } - - console_locked = 0; - +- console_locked = 0; +- - raw_spin_unlock(&logbuf_lock); - - up_console_sem(); +- up_console_sem(); - - /* - * Someone could have filled up the buffer again, so re-check if there's @@ -18058,12 +18142,21 @@ index ecd28d4fa..e95b00f24 100644 - raw_spin_unlock(&logbuf_lock); - printk_safe_exit_irqrestore(flags); - -- if (retry && console_trylock()) +- if (retry && !abandon_console_lock_in_panic() && console_trylock()) - goto again; - } +-} ++ { ++ if (console_suspended) { ++ up_console_sem(); ++ return; ++ } ++ console_locked = 0; ++ up_console_sem(); ++ } EXPORT_SYMBOL(console_unlock); -@@ -2632,23 +2643,20 @@ void console_unblank(void) + /** +@@ -2685,23 +2677,20 @@ void console_unblank(void) */ void console_flush_on_panic(enum con_flush_mode mode) { @@ -18098,7 +18191,7 @@ index ecd28d4fa..e95b00f24 100644 console_unlock(); } EXPORT_SYMBOL(console_flush_on_panic); -@@ -2784,7 +2792,6 @@ static int try_enable_new_console(struct console *newcon, bool user_specified) +@@ -2837,7 +2826,6 @@ static int try_enable_new_console(struct console *newcon, bool user_specified) */ void register_console(struct console *newcon) { @@ -18106,7 +18199,7 @@ index ecd28d4fa..e95b00f24 100644 struct console *bcon = NULL; int err; -@@ -2808,6 +2815,8 @@ void register_console(struct console *newcon) +@@ -2861,6 +2849,8 @@ void register_console(struct console *newcon) } } @@ -18115,7 +18208,7 @@ index ecd28d4fa..e95b00f24 100644 if (console_drivers && console_drivers->flags & CON_BOOT) bcon = console_drivers; -@@ -2849,8 +2858,10 @@ void register_console(struct console *newcon) +@@ -2902,8 +2892,10 @@ void register_console(struct console *newcon) * the real console are the same physical device, it's annoying to * see the beginning boot messages twice */ @@ -18127,7 +18220,7 @@ index ecd28d4fa..e95b00f24 100644 /* * Put this console in the list - keep the -@@ -2872,26 +2883,12 @@ void register_console(struct console *newcon) +@@ -2925,26 +2917,12 @@ void register_console(struct console *newcon) if (newcon->flags & CON_EXTENDED) nr_ext_console_drivers++; @@ -18160,7 +18253,7 @@ index ecd28d4fa..e95b00f24 100644 console_unlock(); console_sysfs_notify(); -@@ -2965,6 +2962,9 @@ int unregister_console(struct console *console) +@@ -3018,6 +2996,9 @@ int unregister_console(struct console *console) console_unlock(); console_sysfs_notify(); @@ -18170,7 +18263,7 @@ index ecd28d4fa..e95b00f24 100644 if (console->exit) res = console->exit(console); -@@ -3047,6 +3047,15 @@ static int __init printk_late_init(void) +@@ -3100,6 +3081,15 @@ static int __init printk_late_init(void) unregister_console(con); } } @@ -18186,7 +18279,7 @@ index ecd28d4fa..e95b00f24 100644 ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL, console_cpu_notify); WARN_ON(ret < 0); -@@ -3062,7 +3071,6 @@ late_initcall(printk_late_init); +@@ -3115,7 +3105,6 @@ late_initcall(printk_late_init); * Delayed printk version, for scheduler-internal messages: */ #define PRINTK_PENDING_WAKEUP 0x01 @@ -18194,7 +18287,7 @@ index ecd28d4fa..e95b00f24 100644 static DEFINE_PER_CPU(int, printk_pending); -@@ -3070,14 +3078,8 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work) +@@ -3123,14 +3112,8 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work) { int pending = __this_cpu_xchg(printk_pending, 0); @@ -18210,14 +18303,12 @@ index ecd28d4fa..e95b00f24 100644 } static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = -@@ -3096,25 +3098,10 @@ void wake_up_klogd(void) +@@ -3149,25 +3132,10 @@ void wake_up_klogd(void) preempt_enable(); } -void defer_console_output(void) -+__printf(1, 0) -+static int vprintk_deferred(const char *fmt, va_list args) - { +-{ - if (!printk_percpu_data_ready()) - return; - @@ -18228,7 +18319,9 @@ index ecd28d4fa..e95b00f24 100644 -} - -int vprintk_deferred(const char *fmt, va_list args) --{ ++__printf(1, 0) ++static int vprintk_deferred(const char *fmt, va_list args) + { - int r; - - r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args); @@ -18239,7 +18332,7 @@ index ecd28d4fa..e95b00f24 100644 } int printk_deferred(const char *fmt, ...) -@@ -3253,8 +3240,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_reason_str); +@@ -3306,8 +3274,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_reason_str); */ void kmsg_dump(enum kmsg_dump_reason reason) { @@ -18267,7 +18360,7 @@ index ecd28d4fa..e95b00f24 100644 rcu_read_lock(); list_for_each_entry_rcu(dumper, &dump_list, list) { -@@ -3272,25 +3277,18 @@ void kmsg_dump(enum kmsg_dump_reason reason) +@@ -3325,25 +3311,18 @@ void kmsg_dump(enum kmsg_dump_reason reason) continue; /* initialize iterator with data about the stored records */ @@ -18298,7 +18391,7 @@ index ecd28d4fa..e95b00f24 100644 * @syslog: include the "<4>" prefixes * @line: buffer to copy the line to * @size: maximum size of the buffer -@@ -3304,11 +3302,9 @@ void kmsg_dump(enum kmsg_dump_reason reason) +@@ -3357,11 +3336,9 @@ void kmsg_dump(enum kmsg_dump_reason reason) * * A return value of FALSE indicates that there are no more records to * read. @@ -18312,7 +18405,7 @@ index ecd28d4fa..e95b00f24 100644 { struct printk_info info; unsigned int line_count; -@@ -3318,16 +3314,16 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, +@@ -3371,16 +3348,16 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, prb_rec_init_rd(&r, &info, line, size); @@ -18332,7 +18425,7 @@ index ecd28d4fa..e95b00f24 100644 &info, &line_count)) { goto out; } -@@ -3336,48 +3332,18 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, +@@ -3389,48 +3366,18 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, } @@ -18383,7 +18476,7 @@ index ecd28d4fa..e95b00f24 100644 * @syslog: include the "<4>" prefixes * @buf: buffer to copy the line to * @size: maximum size of the buffer -@@ -3394,116 +3360,256 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line); +@@ -3447,116 +3394,256 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line); * A return value of FALSE indicates that there are no more records to * read. */ @@ -18558,14 +18651,14 @@ index ecd28d4fa..e95b00f24 100644 + * prb_lock: Perform a processor-reentrant spin lock. + * @cpu_lock: A pointer to the lock object. + * @cpu_store: A "flags" pointer to store lock status information. -+ * + * +- * The function is similar to kmsg_dump_rewind(), but grabs no locks. + * If no processor has the lock, the calling processor takes the lock and + * becomes the owner. If the calling processor is already the owner of the + * lock, this function succeeds immediately. If lock is locked by another + * processor, this function spins until the calling processor becomes the + * owner. - * -- * The function is similar to kmsg_dump_rewind(), but grabs no locks. ++ * + * It is safe to call this function from any context and state. */ -void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper) @@ -19174,7 +19267,7 @@ index b1d7aef10..5e11e30f4 100644 This option boosts the priority of preempted RCU readers that block the current preemptible RCU grace period for too long. diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c -index e1bbde264..c77b9873c 100644 +index 66bc1af18..76815894b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -100,8 +100,10 @@ static struct rcu_state rcu_state = { @@ -19190,7 +19283,7 @@ index e1bbde264..c77b9873c 100644 static bool rcu_fanout_exact; module_param(rcu_fanout_exact, bool, 0444); diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c -index 849f0aa99..dd94a602a 100644 +index 0e3821783..2beba0dfd 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -56,8 +56,10 @@ @@ -19206,10 +19299,10 @@ index 849f0aa99..dd94a602a 100644 #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 468427a25..4f7610fd6 100644 +index 6b1d3c850..fc1d7f541 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -65,7 +65,11 @@ const_debug unsigned int sysctl_sched_features = +@@ -66,7 +66,11 @@ const_debug unsigned int sysctl_sched_features = * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */ @@ -19221,7 +19314,7 @@ index 468427a25..4f7610fd6 100644 /* * period over which we measure -rt task CPU usage in us. -@@ -856,9 +860,15 @@ static bool set_nr_if_polling(struct task_struct *p) +@@ -859,9 +863,15 @@ static bool set_nr_if_polling(struct task_struct *p) #endif #endif @@ -19239,7 +19332,7 @@ index 468427a25..4f7610fd6 100644 /* * Atomically grab the task, if ->wake_q is !nil already it means -@@ -894,7 +904,13 @@ static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) +@@ -897,7 +907,13 @@ static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) */ void wake_q_add(struct wake_q_head *head, struct task_struct *task) { @@ -19254,7 +19347,7 @@ index 468427a25..4f7610fd6 100644 get_task_struct(task); } -@@ -917,28 +933,39 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task) +@@ -920,28 +936,39 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task) */ void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) { @@ -19299,7 +19392,7 @@ index 468427a25..4f7610fd6 100644 put_task_struct(task); } } -@@ -974,6 +1001,48 @@ void resched_curr(struct rq *rq) +@@ -977,6 +1004,48 @@ void resched_curr(struct rq *rq) trace_sched_wake_idle_without_ipi(cpu); } @@ -19348,7 +19441,7 @@ index 468427a25..4f7610fd6 100644 void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); -@@ -2063,6 +2132,82 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) +@@ -2069,6 +2138,82 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) #ifdef CONFIG_SMP @@ -19431,7 +19524,7 @@ index 468427a25..4f7610fd6 100644 /* * Per-CPU kthreads are allowed to run on !active && online CPUs, see * __set_cpus_allowed_ptr() and select_fallback_rq(). -@@ -2072,7 +2217,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) +@@ -2078,7 +2223,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) if (!cpumask_test_cpu(cpu, p->cpus_ptr)) return false; @@ -19440,7 +19533,7 @@ index 468427a25..4f7610fd6 100644 return cpu_online(cpu); return cpu_active(cpu); -@@ -2117,8 +2262,21 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, +@@ -2123,8 +2268,21 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, } struct migration_arg { @@ -19464,7 +19557,7 @@ index 468427a25..4f7610fd6 100644 }; /* -@@ -2151,15 +2309,17 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, +@@ -2157,15 +2315,17 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, static int migration_cpu_stop(void *data) { struct migration_arg *arg = data; @@ -19483,7 +19576,7 @@ index 468427a25..4f7610fd6 100644 /* * We need to explicitly wake pending tasks before running * __migrate_task() such that we will not miss enforcing cpus_ptr -@@ -2169,21 +2329,121 @@ static int migration_cpu_stop(void *data) +@@ -2175,21 +2335,121 @@ static int migration_cpu_stop(void *data) raw_spin_lock(&p->pi_lock); rq_lock(rq, &rf); @@ -19608,7 +19701,7 @@ index 468427a25..4f7610fd6 100644 return 0; } -@@ -2191,18 +2451,39 @@ static int migration_cpu_stop(void *data) +@@ -2197,18 +2457,39 @@ static int migration_cpu_stop(void *data) * sched_class::set_cpus_allowed must do the below, but is not required to * actually call this function. */ @@ -19651,7 +19744,7 @@ index 468427a25..4f7610fd6 100644 queued = task_on_rq_queued(p); running = task_current(rq, p); -@@ -2218,7 +2499,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +@@ -2224,7 +2505,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) if (running) put_prev_task(rq, p); @@ -19660,7 +19753,7 @@ index 468427a25..4f7610fd6 100644 if (queued) enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); -@@ -2226,6 +2507,222 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +@@ -2232,6 +2513,222 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) set_next_task(rq, p); } @@ -19883,7 +19976,7 @@ index 468427a25..4f7610fd6 100644 /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on -@@ -2236,7 +2733,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +@@ -2242,7 +2739,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) * call is not atomic; no spinlocks may be held. */ static int __set_cpus_allowed_ptr(struct task_struct *p, @@ -19893,7 +19986,7 @@ index 468427a25..4f7610fd6 100644 { const struct cpumask *cpu_valid_mask = cpu_active_mask; unsigned int dest_cpu; -@@ -2247,9 +2745,14 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2253,9 +2751,14 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, rq = task_rq_lock(p, &rf); update_rq_clock(rq); @@ -19910,7 +20003,7 @@ index 468427a25..4f7610fd6 100644 */ cpu_valid_mask = cpu_online_mask; } -@@ -2258,13 +2761,22 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2264,13 +2767,22 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, * Must re-check here, to close a race against __kthread_bind(), * sched_setaffinity() is not guaranteed to observe the flag. */ @@ -19936,7 +20029,7 @@ index 468427a25..4f7610fd6 100644 /* * Picking a ~random cpu helps in cases where we are changing affinity -@@ -2277,7 +2789,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2283,7 +2795,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, goto out; } @@ -19945,7 +20038,7 @@ index 468427a25..4f7610fd6 100644 if (p->flags & PF_KTHREAD) { /* -@@ -2289,23 +2801,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2295,23 +2807,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, p->nr_cpus_allowed != 1); } @@ -19970,7 +20063,7 @@ index 468427a25..4f7610fd6 100644 out: task_rq_unlock(rq, p, &rf); -@@ -2314,7 +2811,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -2320,7 +2817,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) { @@ -19979,7 +20072,7 @@ index 468427a25..4f7610fd6 100644 } EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); -@@ -2355,6 +2852,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) +@@ -2361,6 +2858,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) * Clearly, migrating tasks to offline CPUs is a fairly daft thing. */ WARN_ON_ONCE(!cpu_online(new_cpu)); @@ -19988,7 +20081,7 @@ index 468427a25..4f7610fd6 100644 #endif trace_sched_migrate_task(p, new_cpu); -@@ -2487,6 +2986,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, +@@ -2493,6 +2992,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, } #endif /* CONFIG_NUMA_BALANCING */ @@ -20007,7 +20100,7 @@ index 468427a25..4f7610fd6 100644 /* * wait_task_inactive - wait for a thread to unschedule. * -@@ -2531,7 +3042,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) +@@ -2537,7 +3048,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * is actually now running somewhere else! */ while (task_running(rq, p)) { @@ -20016,7 +20109,7 @@ index 468427a25..4f7610fd6 100644 return 0; cpu_relax(); } -@@ -2546,7 +3057,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) +@@ -2552,7 +3063,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) running = task_running(rq, p); queued = task_on_rq_queued(p); ncsw = 0; @@ -20026,7 +20119,7 @@ index 468427a25..4f7610fd6 100644 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ task_rq_unlock(rq, p, &rf); -@@ -2580,7 +3092,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) +@@ -2586,7 +3098,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) ktime_t to = NSEC_PER_SEC / HZ; set_current_state(TASK_UNINTERRUPTIBLE); @@ -20035,7 +20128,7 @@ index 468427a25..4f7610fd6 100644 continue; } -@@ -2685,6 +3197,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p) +@@ -2691,6 +3203,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p) } fallthrough; case possible: @@ -20048,7 +20141,7 @@ index 468427a25..4f7610fd6 100644 do_set_cpus_allowed(p, cpu_possible_mask); state = fail; break; -@@ -2719,7 +3237,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) +@@ -2725,7 +3243,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) { lockdep_assert_held(&p->pi_lock); @@ -20057,7 +20150,7 @@ index 468427a25..4f7610fd6 100644 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); else cpu = cpumask_any(p->cpus_ptr); -@@ -2742,6 +3260,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) +@@ -2748,6 +3266,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) void sched_set_stop_task(int cpu, struct task_struct *stop) { @@ -20065,7 +20158,7 @@ index 468427a25..4f7610fd6 100644 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; struct task_struct *old_stop = cpu_rq(cpu)->stop; -@@ -2757,6 +3276,20 @@ void sched_set_stop_task(int cpu, struct task_struct *stop) +@@ -2763,6 +3282,20 @@ void sched_set_stop_task(int cpu, struct task_struct *stop) sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); stop->sched_class = &stop_sched_class; @@ -20086,7 +20179,7 @@ index 468427a25..4f7610fd6 100644 } cpu_rq(cpu)->stop = stop; -@@ -2770,15 +3303,23 @@ void sched_set_stop_task(int cpu, struct task_struct *stop) +@@ -2776,15 +3309,23 @@ void sched_set_stop_task(int cpu, struct task_struct *stop) } } @@ -20113,7 +20206,7 @@ index 468427a25..4f7610fd6 100644 static void ttwu_stat(struct task_struct *p, int cpu, int wake_flags) -@@ -3220,7 +3761,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) +@@ -3226,7 +3767,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) int cpu, success = 0; preempt_disable(); @@ -20122,7 +20215,7 @@ index 468427a25..4f7610fd6 100644 /* * We're waking current, this means 'p->on_rq' and 'task_cpu(p) * == smp_processor_id()'. Together this means we can special -@@ -3250,8 +3791,26 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) +@@ -3256,8 +3797,26 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) */ raw_spin_lock_irqsave(&p->pi_lock, flags); smp_mb__after_spinlock(); @@ -20150,7 +20243,7 @@ index 468427a25..4f7610fd6 100644 trace_sched_waking(p); -@@ -3440,6 +3999,18 @@ int wake_up_process(struct task_struct *p) +@@ -3446,6 +4005,18 @@ int wake_up_process(struct task_struct *p) } EXPORT_SYMBOL(wake_up_process); @@ -20169,7 +20262,7 @@ index 468427a25..4f7610fd6 100644 int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); -@@ -3493,6 +4064,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) +@@ -3499,6 +4070,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) init_numa_balancing(clone_flags, p); #ifdef CONFIG_SMP p->wake_entry.u_flags = CSD_TYPE_TTWU; @@ -20177,7 +20270,7 @@ index 468427a25..4f7610fd6 100644 #endif #ifdef CONFIG_BPF_SCHED p->tag = 0; -@@ -3712,6 +4284,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) +@@ -3718,6 +4290,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) p->on_cpu = 0; #endif init_task_preempt_count(p); @@ -20187,7 +20280,7 @@ index 468427a25..4f7610fd6 100644 #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); -@@ -3882,60 +4457,145 @@ __fire_sched_out_preempt_notifiers(struct task_struct *curr, +@@ -3888,60 +4463,145 @@ __fire_sched_out_preempt_notifiers(struct task_struct *curr, notifier->ops->sched_out(notifier, next); } @@ -20371,7 +20464,7 @@ index 468427a25..4f7610fd6 100644 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) { -@@ -3976,6 +4636,22 @@ static inline void finish_lock_switch(struct rq *rq) +@@ -3982,6 +4642,22 @@ static inline void finish_lock_switch(struct rq *rq) # define finish_arch_post_lock_switch() do { } while (0) #endif @@ -20394,7 +20487,7 @@ index 468427a25..4f7610fd6 100644 /** * prepare_task_switch - prepare to switch tasks * @rq: the runqueue preparing to switch -@@ -3998,6 +4674,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, +@@ -4004,6 +4680,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, perf_event_task_sched_out(prev, next); rseq_preempt(prev); fire_sched_out_preempt_notifiers(prev, next); @@ -20402,7 +20495,7 @@ index 468427a25..4f7610fd6 100644 prepare_task(next); prepare_arch_switch(next); } -@@ -4065,6 +4742,7 @@ static struct rq *finish_task_switch(struct task_struct *prev) +@@ -4071,6 +4748,7 @@ static struct rq *finish_task_switch(struct task_struct *prev) finish_lock_switch(rq); finish_arch_post_lock_switch(); kcov_finish_switch(current); @@ -20410,7 +20503,7 @@ index 468427a25..4f7610fd6 100644 fire_sched_in_preempt_notifiers(current); /* -@@ -4081,63 +4759,19 @@ static struct rq *finish_task_switch(struct task_struct *prev) +@@ -4087,63 +4765,19 @@ static struct rq *finish_task_switch(struct task_struct *prev) */ if (mm) { membarrier_mm_sync_core_before_usermode(mm); @@ -20475,7 +20568,7 @@ index 468427a25..4f7610fd6 100644 /** * schedule_tail - first thing a freshly forked thread must call. -@@ -4158,7 +4792,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) +@@ -4164,7 +4798,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) */ rq = finish_task_switch(prev); @@ -20483,7 +20576,7 @@ index 468427a25..4f7610fd6 100644 preempt_enable(); if (current->set_child_tid) -@@ -5314,7 +5947,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) +@@ -5325,7 +5958,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) * * WARNING: must be called with preemption disabled! */ @@ -20492,7 +20585,7 @@ index 468427a25..4f7610fd6 100644 { struct task_struct *prev, *next; unsigned long *switch_count; -@@ -5367,7 +6000,7 @@ static void __sched notrace __schedule(bool preempt) +@@ -5378,7 +6011,7 @@ static void __sched notrace __schedule(bool preempt) * - ptrace_{,un}freeze_traced() can change ->state underneath us. */ prev_state = prev->state; @@ -20501,7 +20594,7 @@ index 468427a25..4f7610fd6 100644 if (signal_pending_state(prev_state, prev)) { prev->state = TASK_RUNNING; } else { -@@ -5402,6 +6035,7 @@ static void __sched notrace __schedule(bool preempt) +@@ -5413,6 +6046,7 @@ static void __sched notrace __schedule(bool preempt) next = pick_next_task(rq, prev, &rf); clear_tsk_need_resched(prev); @@ -20509,7 +20602,7 @@ index 468427a25..4f7610fd6 100644 clear_preempt_need_resched(); if (likely(prev != next)) { -@@ -5427,6 +6061,7 @@ static void __sched notrace __schedule(bool preempt) +@@ -5438,6 +6072,7 @@ static void __sched notrace __schedule(bool preempt) */ ++*switch_count; @@ -20517,7 +20610,7 @@ index 468427a25..4f7610fd6 100644 psi_sched_switch(prev, next, !task_on_rq_queued(prev)); trace_sched_switch(preempt, prev, next); -@@ -5435,10 +6070,11 @@ static void __sched notrace __schedule(bool preempt) +@@ -5446,10 +6081,11 @@ static void __sched notrace __schedule(bool preempt) rq = context_switch(rq, prev, next, &rf); } else { rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); @@ -20532,7 +20625,7 @@ index 468427a25..4f7610fd6 100644 } void __noreturn do_task_dead(void) -@@ -5449,7 +6085,7 @@ void __noreturn do_task_dead(void) +@@ -5460,7 +6096,7 @@ void __noreturn do_task_dead(void) /* Tell freezer to ignore us: */ current->flags |= PF_NOFREEZE; @@ -20541,7 +20634,7 @@ index 468427a25..4f7610fd6 100644 BUG(); /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ -@@ -5482,9 +6118,6 @@ static inline void sched_submit_work(struct task_struct *tsk) +@@ -5493,9 +6129,6 @@ static inline void sched_submit_work(struct task_struct *tsk) preempt_enable_no_resched(); } @@ -20551,7 +20644,7 @@ index 468427a25..4f7610fd6 100644 /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. -@@ -5510,7 +6143,7 @@ asmlinkage __visible void __sched schedule(void) +@@ -5521,7 +6154,7 @@ asmlinkage __visible void __sched schedule(void) sched_submit_work(tsk); do { preempt_disable(); @@ -20560,7 +20653,7 @@ index 468427a25..4f7610fd6 100644 sched_preempt_enable_no_resched(); } while (need_resched()); sched_update_worker(tsk); -@@ -5538,7 +6171,7 @@ void __sched schedule_idle(void) +@@ -5549,7 +6182,7 @@ void __sched schedule_idle(void) */ WARN_ON_ONCE(current->state); do { @@ -20569,7 +20662,7 @@ index 468427a25..4f7610fd6 100644 } while (need_resched()); } -@@ -5591,7 +6224,7 @@ static void __sched notrace preempt_schedule_common(void) +@@ -5602,7 +6235,7 @@ static void __sched notrace preempt_schedule_common(void) */ preempt_disable_notrace(); preempt_latency_start(1); @@ -20578,7 +20671,7 @@ index 468427a25..4f7610fd6 100644 preempt_latency_stop(1); preempt_enable_no_resched_notrace(); -@@ -5602,6 +6235,30 @@ static void __sched notrace preempt_schedule_common(void) +@@ -5613,6 +6246,30 @@ static void __sched notrace preempt_schedule_common(void) } while (need_resched()); } @@ -20609,7 +20702,7 @@ index 468427a25..4f7610fd6 100644 #ifdef CONFIG_PREEMPTION /* * This is the entry point to schedule() from in-kernel preemption -@@ -5615,12 +6272,26 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) +@@ -5626,12 +6283,26 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) */ if (likely(!preemptible())) return; @@ -20637,7 +20730,7 @@ index 468427a25..4f7610fd6 100644 #ifdef CONFIG_PREEMPT_DYNAMIC DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func); EXPORT_STATIC_CALL(preempt_schedule); -@@ -5648,6 +6319,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) +@@ -5659,6 +6330,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) if (likely(!preemptible())) return; @@ -20647,7 +20740,7 @@ index 468427a25..4f7610fd6 100644 do { /* * Because the function tracer can trace preempt_count_sub() -@@ -5670,7 +6344,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) +@@ -5681,7 +6355,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) * an infinite recursion. */ prev_ctx = exception_enter(); @@ -20656,7 +20749,7 @@ index 468427a25..4f7610fd6 100644 exception_exit(prev_ctx); preempt_latency_stop(1); -@@ -5888,7 +6562,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) +@@ -5899,7 +6573,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) do { preempt_disable(); local_irq_enable(); @@ -20665,7 +20758,7 @@ index 468427a25..4f7610fd6 100644 local_irq_disable(); sched_preempt_enable_no_resched(); } while (need_resched()); -@@ -6054,9 +6728,11 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) +@@ -6065,9 +6739,11 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) out_unlock: /* Avoid rq from going away on us: */ preempt_disable(); @@ -20679,7 +20772,7 @@ index 468427a25..4f7610fd6 100644 preempt_enable(); } #else -@@ -6299,6 +6975,7 @@ static int __sched_setscheduler(struct task_struct *p, +@@ -6310,6 +6986,7 @@ static int __sched_setscheduler(struct task_struct *p, int oldpolicy = -1, policy = attr->sched_policy; int retval, oldprio, newprio, queued, running; const struct sched_class *prev_class; @@ -20687,7 +20780,7 @@ index 468427a25..4f7610fd6 100644 struct rq_flags rf; int reset_on_fork; int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; -@@ -6553,6 +7230,7 @@ static int __sched_setscheduler(struct task_struct *p, +@@ -6564,6 +7241,7 @@ static int __sched_setscheduler(struct task_struct *p, /* Avoid rq from going away on us: */ preempt_disable(); @@ -20695,7 +20788,7 @@ index 468427a25..4f7610fd6 100644 task_rq_unlock(rq, p, &rf); if (pi) { -@@ -6561,7 +7239,7 @@ static int __sched_setscheduler(struct task_struct *p, +@@ -6572,7 +7250,7 @@ static int __sched_setscheduler(struct task_struct *p, } /* Run balance callbacks after we've adjusted the PI chain: */ @@ -20704,7 +20797,7 @@ index 468427a25..4f7610fd6 100644 preempt_enable(); return 0; -@@ -7056,7 +7734,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) +@@ -7067,7 +7745,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) } #endif again: @@ -20713,7 +20806,7 @@ index 468427a25..4f7610fd6 100644 if (!retval) { cpuset_cpus_allowed(p, cpus_allowed); -@@ -7642,7 +8320,7 @@ void __init init_idle(struct task_struct *idle, int cpu) +@@ -7693,7 +8371,7 @@ void __init init_idle(struct task_struct *idle, int cpu) * * And since this is boot we can forgo the serialization. */ @@ -20722,7 +20815,7 @@ index 468427a25..4f7610fd6 100644 #endif /* * We're having a chicken and egg problem, even though we are -@@ -7669,7 +8347,9 @@ void __init init_idle(struct task_struct *idle, int cpu) +@@ -7720,7 +8398,9 @@ void __init init_idle(struct task_struct *idle, int cpu) /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); @@ -20733,7 +20826,7 @@ index 468427a25..4f7610fd6 100644 /* * The idle tasks have their own, simple scheduling class: */ -@@ -7779,6 +8459,7 @@ void sched_setnuma(struct task_struct *p, int nid) +@@ -7830,6 +8510,7 @@ void sched_setnuma(struct task_struct *p, int nid) #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_HOTPLUG_CPU @@ -20741,7 +20834,7 @@ index 468427a25..4f7610fd6 100644 /* * Ensure that the idle task is using init_mm right before its CPU goes * offline. -@@ -7798,119 +8479,126 @@ void idle_task_exit(void) +@@ -7849,119 +8530,126 @@ void idle_task_exit(void) /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ } @@ -20958,7 +21051,7 @@ index 468427a25..4f7610fd6 100644 #endif /* CONFIG_HOTPLUG_CPU */ void set_rq_online(struct rq *rq) -@@ -7998,6 +8686,8 @@ int sched_cpu_activate(unsigned int cpu) +@@ -8049,6 +8737,8 @@ int sched_cpu_activate(unsigned int cpu) struct rq *rq = cpu_rq(cpu); struct rq_flags rf; @@ -20967,7 +21060,7 @@ index 468427a25..4f7610fd6 100644 #ifdef CONFIG_SCHED_SMT /* * When going up, increment the number of cores with SMT present. -@@ -8033,6 +8723,8 @@ int sched_cpu_activate(unsigned int cpu) +@@ -8085,6 +8775,8 @@ int sched_cpu_activate(unsigned int cpu) int sched_cpu_deactivate(unsigned int cpu) { @@ -20976,7 +21069,7 @@ index 468427a25..4f7610fd6 100644 int ret; set_cpu_active(cpu, false); -@@ -8045,6 +8737,16 @@ int sched_cpu_deactivate(unsigned int cpu) +@@ -8097,6 +8789,16 @@ int sched_cpu_deactivate(unsigned int cpu) */ synchronize_rcu(); @@ -20993,7 +21086,7 @@ index 468427a25..4f7610fd6 100644 #ifdef CONFIG_SCHED_SMT /* * When going down, decrement the number of cores with SMT present. -@@ -8088,6 +8790,41 @@ int sched_cpu_starting(unsigned int cpu) +@@ -8141,6 +8843,41 @@ int sched_cpu_starting(unsigned int cpu) } #ifdef CONFIG_HOTPLUG_CPU @@ -21035,7 +21128,7 @@ index 468427a25..4f7610fd6 100644 int sched_cpu_dying(unsigned int cpu) { struct rq *rq = cpu_rq(cpu); -@@ -8097,12 +8834,7 @@ int sched_cpu_dying(unsigned int cpu) +@@ -8150,12 +8887,7 @@ int sched_cpu_dying(unsigned int cpu) sched_tick_stop(cpu); rq_lock_irqsave(rq, &rf); @@ -21049,7 +21142,7 @@ index 468427a25..4f7610fd6 100644 rq_unlock_irqrestore(rq, &rf); calc_load_migrate(rq); -@@ -8320,6 +9052,9 @@ void __init sched_init(void) +@@ -8376,6 +9108,9 @@ void __init sched_init(void) INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); #endif @@ -21059,7 +21152,7 @@ index 468427a25..4f7610fd6 100644 #endif /* CONFIG_SMP */ hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); -@@ -8370,7 +9105,7 @@ void __init sched_init(void) +@@ -8426,7 +9161,7 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { @@ -21068,7 +21161,7 @@ index 468427a25..4f7610fd6 100644 return (nested == preempt_offset); } -@@ -8467,6 +9202,39 @@ void __cant_sleep(const char *file, int line, int preempt_offset) +@@ -8523,6 +9258,39 @@ void __cant_sleep(const char *file, int line, int preempt_offset) add_taint(TAINT_WARN, LOCKDEP_STILL_OK); } EXPORT_SYMBOL_GPL(__cant_sleep); @@ -21228,7 +21321,7 @@ index ca0eef7d3..02a5aa60f 100644 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, u64 *ut, u64 *st) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c -index c4c0d760d..14252d5be 100644 +index d8aea5947..3382ed7fc 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -551,7 +551,7 @@ static int push_dl_task(struct rq *rq); @@ -21240,7 +21333,7 @@ index c4c0d760d..14252d5be 100644 } static DEFINE_PER_CPU(struct callback_head, dl_push_head); -@@ -1913,7 +1913,7 @@ static void task_fork_dl(struct task_struct *p) +@@ -1912,7 +1912,7 @@ static void task_fork_dl(struct task_struct *p) static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && @@ -21249,7 +21342,7 @@ index c4c0d760d..14252d5be 100644 return 1; return 0; } -@@ -2003,8 +2003,8 @@ static int find_later_rq(struct task_struct *task) +@@ -2002,8 +2002,8 @@ static int find_later_rq(struct task_struct *task) return this_cpu; } @@ -21260,7 +21353,7 @@ index c4c0d760d..14252d5be 100644 /* * Last chance: if a CPU being in both later_mask * and current sd span is valid, that becomes our -@@ -2026,7 +2026,7 @@ static int find_later_rq(struct task_struct *task) +@@ -2025,7 +2025,7 @@ static int find_later_rq(struct task_struct *task) if (this_cpu != -1) return this_cpu; @@ -21269,7 +21362,7 @@ index c4c0d760d..14252d5be 100644 if (cpu < nr_cpu_ids) return cpu; -@@ -2091,7 +2091,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) +@@ -2090,7 +2090,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) */ next_task = pick_next_pushable_dl_task(rq); if (unlikely(next_task != task || @@ -21278,7 +21371,7 @@ index c4c0d760d..14252d5be 100644 double_unlock_balance(rq, later_rq); later_rq = NULL; break; -@@ -2135,6 +2135,9 @@ static int push_dl_task(struct rq *rq) +@@ -2134,6 +2134,9 @@ static int push_dl_task(struct rq *rq) return 0; retry: @@ -21288,7 +21381,7 @@ index c4c0d760d..14252d5be 100644 if (WARN_ON(next_task == rq->curr)) return 0; -@@ -2212,7 +2215,7 @@ static void push_dl_tasks(struct rq *rq) +@@ -2211,7 +2214,7 @@ static void push_dl_tasks(struct rq *rq) static void pull_dl_task(struct rq *this_rq) { int this_cpu = this_rq->cpu, cpu; @@ -21297,7 +21390,7 @@ index c4c0d760d..14252d5be 100644 bool resched = false; struct rq *src_rq; u64 dmin = LONG_MAX; -@@ -2242,6 +2245,7 @@ static void pull_dl_task(struct rq *this_rq) +@@ -2241,6 +2244,7 @@ static void pull_dl_task(struct rq *this_rq) continue; /* Might drop this_rq->lock */ @@ -21305,7 +21398,7 @@ index c4c0d760d..14252d5be 100644 double_lock_balance(this_rq, src_rq); /* -@@ -2273,17 +2277,28 @@ static void pull_dl_task(struct rq *this_rq) +@@ -2272,17 +2276,28 @@ static void pull_dl_task(struct rq *this_rq) src_rq->curr->dl.deadline)) goto skip; @@ -21340,7 +21433,7 @@ index c4c0d760d..14252d5be 100644 } if (resched) -@@ -2307,7 +2322,8 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p) +@@ -2306,7 +2321,8 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p) } static void set_cpus_allowed_dl(struct task_struct *p, @@ -21350,7 +21443,7 @@ index c4c0d760d..14252d5be 100644 { struct root_domain *src_rd; struct rq *rq; -@@ -2336,7 +2352,7 @@ static void set_cpus_allowed_dl(struct task_struct *p, +@@ -2335,7 +2351,7 @@ static void set_cpus_allowed_dl(struct task_struct *p, raw_spin_unlock(&src_dl_b->lock); } @@ -21359,7 +21452,7 @@ index c4c0d760d..14252d5be 100644 } /* Assumes rq->lock is held */ -@@ -2532,6 +2548,7 @@ const struct sched_class dl_sched_class +@@ -2531,6 +2547,7 @@ const struct sched_class dl_sched_class .rq_online = rq_online_dl, .rq_offline = rq_offline_dl, .task_woken = task_woken_dl, @@ -21368,10 +21461,10 @@ index c4c0d760d..14252d5be 100644 .task_tick = task_tick_dl, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index b8bf7acb9..1dcf17497 100644 +index ce8e75e18..b06e1efb9 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4673,7 +4673,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) +@@ -4717,7 +4717,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) #endif if (delta_exec > ideal_runtime) { @@ -21380,7 +21473,7 @@ index b8bf7acb9..1dcf17497 100644 /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. -@@ -4697,7 +4697,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) +@@ -4741,7 +4741,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) return; if (delta > ideal_runtime) @@ -21389,7 +21482,7 @@ index b8bf7acb9..1dcf17497 100644 } static void -@@ -4840,7 +4840,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) +@@ -4884,7 +4884,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) * validating it and just reschedule. */ if (queued) { @@ -21398,7 +21491,7 @@ index b8bf7acb9..1dcf17497 100644 return; } /* -@@ -4989,7 +4989,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) +@@ -5033,7 +5033,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -21407,7 +21500,7 @@ index b8bf7acb9..1dcf17497 100644 } static __always_inline -@@ -5783,7 +5783,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) +@@ -6281,7 +6281,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) if (delta < 0) { if (rq->curr == p) @@ -21416,7 +21509,7 @@ index b8bf7acb9..1dcf17497 100644 return; } hrtick_start(rq, delta); -@@ -7705,7 +7705,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ +@@ -8267,7 +8267,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ return; preempt: @@ -21425,7 +21518,7 @@ index b8bf7acb9..1dcf17497 100644 /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -12578,7 +12578,7 @@ static void task_fork_fair(struct task_struct *p) +@@ -13158,7 +13158,7 @@ static void task_fork_fair(struct task_struct *p) * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -21434,7 +21527,7 @@ index b8bf7acb9..1dcf17497 100644 } se->vruntime -= cfs_rq->min_vruntime; -@@ -12605,7 +12605,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) +@@ -13185,7 +13185,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) */ if (rq->curr == p) { if (p->prio > oldprio) @@ -21468,7 +21561,7 @@ index 76fade025..0a20427ef 100644 /* * When doing wakeups, attempt to limit superfluous scans of the LLC domain. diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c -index ca868c04f..9798149b5 100644 +index 52062b910..7488bcf38 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -270,7 +270,7 @@ static void pull_rt_task(struct rq *this_rq); @@ -21480,7 +21573,7 @@ index ca868c04f..9798149b5 100644 } static inline int rt_overloaded(struct rq *rq) -@@ -1679,7 +1679,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) +@@ -1681,7 +1681,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && @@ -21489,7 +21582,7 @@ index ca868c04f..9798149b5 100644 return 1; return 0; -@@ -1773,8 +1773,8 @@ static int find_lowest_rq(struct task_struct *task) +@@ -1775,8 +1775,8 @@ static int find_lowest_rq(struct task_struct *task) return this_cpu; } @@ -21500,7 +21593,7 @@ index ca868c04f..9798149b5 100644 if (best_cpu < nr_cpu_ids) { rcu_read_unlock(); return best_cpu; -@@ -1791,7 +1791,7 @@ static int find_lowest_rq(struct task_struct *task) +@@ -1793,7 +1793,7 @@ static int find_lowest_rq(struct task_struct *task) if (this_cpu != -1) return this_cpu; @@ -21509,7 +21602,7 @@ index ca868c04f..9798149b5 100644 if (cpu < nr_cpu_ids) return cpu; -@@ -1852,7 +1852,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) +@@ -1854,7 +1854,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) */ struct task_struct *next_task = pick_next_pushable_task(rq); if (unlikely(next_task != task || @@ -21518,7 +21611,7 @@ index ca868c04f..9798149b5 100644 double_unlock_balance(rq, lowest_rq); lowest_rq = NULL; break; -@@ -1876,7 +1876,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) +@@ -1878,7 +1878,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) * running task can migrate over to a CPU that is running a task * of lesser priority. */ @@ -21527,7 +21620,7 @@ index ca868c04f..9798149b5 100644 { struct task_struct *next_task; struct rq *lowest_rq; -@@ -1890,6 +1890,39 @@ static int push_rt_task(struct rq *rq) +@@ -1892,6 +1892,39 @@ static int push_rt_task(struct rq *rq) return 0; retry: @@ -21567,7 +21660,7 @@ index ca868c04f..9798149b5 100644 if (WARN_ON(next_task == rq->curr)) return 0; -@@ -1947,12 +1980,10 @@ static int push_rt_task(struct rq *rq) +@@ -1949,12 +1982,10 @@ static int push_rt_task(struct rq *rq) deactivate_task(rq, next_task, 0); set_task_cpu(next_task, lowest_rq->cpu); activate_task(lowest_rq, next_task, 0); @@ -21581,7 +21674,7 @@ index ca868c04f..9798149b5 100644 out: put_task_struct(next_task); -@@ -1962,7 +1993,7 @@ static int push_rt_task(struct rq *rq) +@@ -1964,7 +1995,7 @@ static int push_rt_task(struct rq *rq) static void push_rt_tasks(struct rq *rq) { /* push_rt_task will return true if it moved an RT */ @@ -21590,7 +21683,7 @@ index ca868c04f..9798149b5 100644 ; } -@@ -2115,7 +2146,10 @@ void rto_push_irq_work_func(struct irq_work *work) +@@ -2117,7 +2148,10 @@ void rto_push_irq_work_func(struct irq_work *work) */ if (has_pushable_tasks(rq)) { raw_spin_rq_lock(rq); @@ -21602,7 +21695,7 @@ index ca868c04f..9798149b5 100644 raw_spin_rq_unlock(rq); } -@@ -2140,7 +2174,7 @@ static void pull_rt_task(struct rq *this_rq) +@@ -2142,7 +2176,7 @@ static void pull_rt_task(struct rq *this_rq) { int this_cpu = this_rq->cpu, cpu; bool resched = false; @@ -21611,7 +21704,7 @@ index ca868c04f..9798149b5 100644 struct rq *src_rq; int rt_overload_count = rt_overloaded(this_rq); -@@ -2187,6 +2221,7 @@ static void pull_rt_task(struct rq *this_rq) +@@ -2189,6 +2223,7 @@ static void pull_rt_task(struct rq *this_rq) * double_lock_balance, and another CPU could * alter this_rq */ @@ -21619,7 +21712,7 @@ index ca868c04f..9798149b5 100644 double_lock_balance(this_rq, src_rq); /* -@@ -2214,11 +2249,15 @@ static void pull_rt_task(struct rq *this_rq) +@@ -2216,11 +2251,15 @@ static void pull_rt_task(struct rq *this_rq) if (p->prio < src_rq->curr->prio) goto skip; @@ -21640,7 +21733,7 @@ index ca868c04f..9798149b5 100644 /* * We continue with the search, just in * case there's an even higher prio task -@@ -2228,6 +2267,13 @@ static void pull_rt_task(struct rq *this_rq) +@@ -2230,6 +2269,13 @@ static void pull_rt_task(struct rq *this_rq) } skip: double_unlock_balance(this_rq, src_rq); @@ -21654,7 +21747,7 @@ index ca868c04f..9798149b5 100644 } if (resched) -@@ -2477,6 +2523,7 @@ const struct sched_class rt_sched_class +@@ -2479,6 +2525,7 @@ const struct sched_class rt_sched_class .rq_offline = rq_offline_rt, .task_woken = task_woken_rt, .switched_from = switched_from_rt, @@ -21663,10 +21756,10 @@ index ca868c04f..9798149b5 100644 .task_tick = task_tick_rt, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index 0f871e9b1..9195d7dff 100644 +index 18c1b3d5a..27ecbde30 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -1043,6 +1043,7 @@ struct rq { +@@ -1095,6 +1095,7 @@ struct rq { unsigned long cpu_capacity_orig; struct callback_head *balance_callback; @@ -21674,7 +21767,7 @@ index 0f871e9b1..9195d7dff 100644 unsigned char nohz_idle_balance; unsigned char idle_balance; -@@ -1073,6 +1074,10 @@ struct rq { +@@ -1125,6 +1126,10 @@ struct rq { /* This is used to determine avg_idle's max value */ u64 max_idle_balance_cost; @@ -21685,7 +21778,7 @@ index 0f871e9b1..9195d7dff 100644 #endif /* CONFIG_SMP */ #ifdef CONFIG_IRQ_TIME_ACCOUNTING -@@ -1146,6 +1151,11 @@ struct rq { +@@ -1198,6 +1203,11 @@ struct rq { unsigned char core_forceidle; unsigned int core_forceidle_seq; #endif @@ -21697,7 +21790,7 @@ index 0f871e9b1..9195d7dff 100644 #if defined(CONFIG_QOS_SCHED_PRIO_LB) && !defined(__GENKSYMS__) struct list_head cfs_offline_tasks; -@@ -1185,6 +1195,16 @@ static inline int cpu_of(struct rq *rq) +@@ -1237,6 +1247,16 @@ static inline int cpu_of(struct rq *rq) return 0; #endif } @@ -21714,7 +21807,7 @@ index 0f871e9b1..9195d7dff 100644 #ifdef CONFIG_QOS_SCHED #ifdef CONFIG_QOS_SCHED_MULTILEVEL -@@ -1602,6 +1622,9 @@ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) +@@ -1654,6 +1674,9 @@ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); rf->clock_update_flags = 0; #endif @@ -21724,7 +21817,7 @@ index 0f871e9b1..9195d7dff 100644 } static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) -@@ -1773,6 +1796,9 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p) +@@ -1825,6 +1848,9 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p) #ifdef CONFIG_SMP @@ -21734,7 +21827,7 @@ index 0f871e9b1..9195d7dff 100644 static inline void queue_balance_callback(struct rq *rq, struct callback_head *head, -@@ -1780,12 +1806,13 @@ queue_balance_callback(struct rq *rq, +@@ -1832,12 +1858,13 @@ queue_balance_callback(struct rq *rq, { lockdep_assert_rq_held(rq); @@ -21749,7 +21842,7 @@ index 0f871e9b1..9195d7dff 100644 } #define rcu_dereference_check_sched_domain(p) \ -@@ -2112,7 +2139,7 @@ static inline int task_on_rq_migrating(struct task_struct *p) +@@ -2163,7 +2190,7 @@ static inline int task_on_rq_migrating(struct task_struct *p) #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ #define WF_FORK 0x02 /* Child wakeup after fork */ #define WF_MIGRATED 0x04 /* Internal use, task got migrated */ @@ -21758,7 +21851,7 @@ index 0f871e9b1..9195d7dff 100644 /* * To aid in avoiding the subversion of "niceness" due to uneven distribution * of tasks with abnormal "nice" values across CPUs the contribution that -@@ -2193,10 +2220,13 @@ struct sched_class { +@@ -2244,10 +2271,13 @@ struct sched_class { void (*task_woken)(struct rq *this_rq, struct task_struct *task); void (*set_cpus_allowed)(struct task_struct *p, @@ -21773,7 +21866,7 @@ index 0f871e9b1..9195d7dff 100644 #endif void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); -@@ -2286,13 +2316,38 @@ static inline bool sched_fair_runnable(struct rq *rq) +@@ -2337,13 +2367,38 @@ static inline bool sched_fair_runnable(struct rq *rq) extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); extern struct task_struct *pick_next_task_idle(struct rq *rq); @@ -21813,7 +21906,7 @@ index 0f871e9b1..9195d7dff 100644 #endif -@@ -2336,6 +2391,15 @@ extern void reweight_task(struct task_struct *p, int prio); +@@ -2387,6 +2442,15 @@ extern void reweight_task(struct task_struct *p, int prio); extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); @@ -22036,10 +22129,10 @@ index eed7a3a38..9769b462e 100644 cgroup_leave_frozen(true); } else { diff --git a/kernel/smp.c b/kernel/smp.c -index 114776d0d..6d35929a1 100644 +index 2023c022a..d763c2daa 100644 --- a/kernel/smp.c +++ b/kernel/smp.c -@@ -480,8 +480,18 @@ void flush_smp_call_function_from_idle(void) +@@ -487,8 +487,18 @@ void flush_smp_call_function_from_idle(void) local_irq_save(flags); flush_smp_call_function_queue(true); @@ -22712,10 +22805,10 @@ index d0bf6da49..7a74b501a 100644 "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg); goto repeat; diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index 544ce87ba..3db616aec 100644 +index ede09dda3..9dcc62155 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c -@@ -2052,6 +2052,36 @@ SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp, +@@ -2054,6 +2054,36 @@ SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp, } #endif @@ -22753,10 +22846,10 @@ index 544ce87ba..3db616aec 100644 * Functions related to boot-time initialization: */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c -index aed5d6b6c..c26a7168f 100644 +index 21d367fe1..102b469dd 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c -@@ -989,7 +989,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) +@@ -1010,7 +1010,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) if (unlikely(local_softirq_pending())) { static int ratelimit; @@ -22792,10 +22885,10 @@ index c1b52dab3..101a73eea 100644 do { ret = __try_to_del_timer_sync(timer, shutdown); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c -index 6eead8a61..0cd86be3b 100644 +index c2bd3285c..2c3d4d937 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -2607,60 +2607,43 @@ enum print_line_t trace_handle_return(struct trace_seq *s) +@@ -2617,60 +2617,43 @@ enum print_line_t trace_handle_return(struct trace_seq *s) } EXPORT_SYMBOL_GPL(trace_handle_return); @@ -22875,7 +22968,7 @@ index 6eead8a61..0cd86be3b 100644 } struct ring_buffer_event * -@@ -3863,14 +3846,17 @@ unsigned long trace_total_entries(struct trace_array *tr) +@@ -3873,14 +3856,17 @@ unsigned long trace_total_entries(struct trace_array *tr) static void print_lat_help_header(struct seq_file *m) { @@ -22901,7 +22994,7 @@ index 6eead8a61..0cd86be3b 100644 } static void print_event_info(struct array_buffer *buf, struct seq_file *m) -@@ -3904,13 +3890,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file +@@ -3914,13 +3900,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file print_event_info(buf, m); @@ -22925,7 +23018,7 @@ index 6eead8a61..0cd86be3b 100644 } void -@@ -9484,7 +9473,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) +@@ -9498,7 +9487,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) tracing_off(); local_irq_save(flags); @@ -22933,7 +23026,7 @@ index 6eead8a61..0cd86be3b 100644 /* Simulate the iterator */ trace_init_global_iter(&iter); -@@ -9564,7 +9552,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) +@@ -9578,7 +9566,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); } atomic_dec(&dump_running); @@ -22942,7 +23035,7 @@ index 6eead8a61..0cd86be3b 100644 } EXPORT_SYMBOL_GPL(ftrace_dump); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h -index 045cd3b14..b2a72a370 100644 +index 6a8a638a2..8916510b4 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -141,25 +141,6 @@ struct kretprobe_trace_entry_head { @@ -22972,7 +23065,7 @@ index 045cd3b14..b2a72a370 100644 struct trace_array; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c -index d2ca0ca09..30c82c785 100644 +index 866d58301..0fdb2b6e7 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -184,6 +184,8 @@ static int trace_define_common_fields(void) @@ -23035,10 +23128,10 @@ index 4778cecee..0c4cc486c 100644 } diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index 3c40cf18d..d0870fc0d 100644 +index 67a776faf..ca834c96e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c -@@ -4847,9 +4847,7 @@ void show_workqueue_state(void) +@@ -4850,9 +4850,7 @@ void show_workqueue_state(void) * drivers that queue work while holding locks * also taken in their write paths. */ @@ -23048,15 +23141,15 @@ index 3c40cf18d..d0870fc0d 100644 } raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); /* -@@ -4873,7 +4871,6 @@ void show_workqueue_state(void) +@@ -4882,7 +4880,6 @@ void show_workqueue_state(void) * queue work while holding locks also taken in their write * paths. */ - printk_safe_enter(); pr_info("pool %d:", pool->id); pr_cont_pool_info(pool); - pr_cont(" hung=%us workers=%d", -@@ -4888,7 +4885,6 @@ void show_workqueue_state(void) + pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers); +@@ -4895,7 +4892,6 @@ void show_workqueue_state(void) first = false; } pr_cont("\n"); @@ -23064,7 +23157,7 @@ index 3c40cf18d..d0870fc0d 100644 next_pool: raw_spin_unlock_irqrestore(&pool->lock, flags); /* -@@ -4980,6 +4976,10 @@ static void unbind_workers(int cpu) +@@ -4987,6 +4983,10 @@ static void unbind_workers(int cpu) pool->flags |= POOL_DISASSOCIATED; raw_spin_unlock_irq(&pool->lock); @@ -23127,21 +23220,20 @@ index fb22fb266..c3c76b833 100644 +} +EXPORT_SYMBOL(cpumask_any_distribute); diff --git a/lib/debugobjects.c b/lib/debugobjects.c -index 71bdc167a..e5ab016ca 100644 +index 4dd9283f6..84077ef82 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c -@@ -564,7 +564,10 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack +@@ -616,7 +616,9 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack + struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; - -- fill_pool(); +- +#ifdef CONFIG_PREEMPT_RT + if (preempt_count() == 0 && !irqs_disabled()) +#endif -+ fill_pool(); + debug_objects_fill_pool(); db = get_bucket((unsigned long) addr); - diff --git a/lib/dump_stack.c b/lib/dump_stack.c index a00ee6eed..f5a33b6f7 100644 --- a/lib/dump_stack.c @@ -23440,7 +23532,7 @@ index 78a630bbd..d27a80502 100644 if ((wait_state != TASK_RUNNING || diff --git a/mm/Kconfig b/mm/Kconfig -index 0fe459e79..46ef95511 100644 +index 70c85533a..8f4fbaeaa 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -404,7 +404,7 @@ config NOMMU_INITIAL_TRIM_EXCESS @@ -23771,7 +23863,7 @@ index efe38ab47..ad72e587c 100644 #if defined(HASHED_PAGE_VIRTUAL) diff --git a/mm/memcontrol.c b/mm/memcontrol.c -index 5082d0664..c574b293e 100644 +index c9ffc793e..f3bbd2a21 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -67,6 +67,7 @@ @@ -23797,7 +23889,7 @@ index 5082d0664..c574b293e 100644 /* Whether legacy memory+swap accounting is active */ static bool do_memsw_account(void) { -@@ -764,6 +773,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, +@@ -760,6 +769,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); memcg = pn->memcg; @@ -23805,7 +23897,7 @@ index 5082d0664..c574b293e 100644 /* Update memcg */ __this_cpu_add(memcg->vmstats_percpu->state[idx], val); -@@ -771,6 +781,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, +@@ -767,6 +777,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val); memcg_rstat_updated(memcg); @@ -23813,7 +23905,7 @@ index 5082d0664..c574b293e 100644 } /** -@@ -2174,6 +2185,7 @@ void unlock_page_memcg(struct page *page) +@@ -2180,6 +2191,7 @@ void unlock_page_memcg(struct page *page) EXPORT_SYMBOL(unlock_page_memcg); struct memcg_stock_pcp { @@ -23821,7 +23913,7 @@ index 5082d0664..c574b293e 100644 struct mem_cgroup *cached; /* this never be root cgroup */ unsigned int nr_pages; -@@ -2225,7 +2237,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) +@@ -2231,7 +2243,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) if (nr_pages > MEMCG_CHARGE_BATCH) return ret; @@ -23830,7 +23922,7 @@ index 5082d0664..c574b293e 100644 stock = this_cpu_ptr(&memcg_stock); if (memcg == stock->cached && stock->nr_pages >= nr_pages) { -@@ -2233,7 +2245,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) +@@ -2239,7 +2251,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) ret = true; } @@ -23839,7 +23931,7 @@ index 5082d0664..c574b293e 100644 return ret; } -@@ -2268,14 +2280,14 @@ static void drain_local_stock(struct work_struct *dummy) +@@ -2274,14 +2286,14 @@ static void drain_local_stock(struct work_struct *dummy) * The only protection from memory hotplug vs. drain_stock races is * that we always operate on local CPU stock here with IRQ disabled */ @@ -23856,7 +23948,7 @@ index 5082d0664..c574b293e 100644 } /* -@@ -2287,7 +2299,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) +@@ -2293,7 +2305,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) struct memcg_stock_pcp *stock; unsigned long flags; @@ -23865,7 +23957,7 @@ index 5082d0664..c574b293e 100644 stock = this_cpu_ptr(&memcg_stock); if (stock->cached != memcg) { /* reset if necessary */ -@@ -2300,7 +2312,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) +@@ -2306,7 +2318,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) if (stock->nr_pages > MEMCG_CHARGE_BATCH) drain_stock(stock); @@ -23874,7 +23966,7 @@ index 5082d0664..c574b293e 100644 } /* -@@ -2320,7 +2332,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) +@@ -2326,7 +2338,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) * as well as workers from this path always operate on the local * per-cpu data. CPU up doesn't touch memcg_stock at all. */ @@ -23883,7 +23975,7 @@ index 5082d0664..c574b293e 100644 for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); struct mem_cgroup *memcg; -@@ -2343,7 +2355,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) +@@ -2349,7 +2361,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) schedule_work_on(cpu, &stock->work); } } @@ -23892,7 +23984,7 @@ index 5082d0664..c574b293e 100644 mutex_unlock(&percpu_charge_mutex); } -@@ -3146,7 +3158,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) +@@ -3160,7 +3172,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) unsigned long flags; bool ret = false; @@ -23901,7 +23993,7 @@ index 5082d0664..c574b293e 100644 stock = this_cpu_ptr(&memcg_stock); if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { -@@ -3154,7 +3166,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) +@@ -3168,7 +3180,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) ret = true; } @@ -23910,7 +24002,7 @@ index 5082d0664..c574b293e 100644 return ret; } -@@ -3210,7 +3222,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) +@@ -3224,7 +3236,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) struct memcg_stock_pcp *stock; unsigned long flags; @@ -23919,7 +24011,7 @@ index 5082d0664..c574b293e 100644 stock = this_cpu_ptr(&memcg_stock); if (stock->cached_objcg != objcg) { /* reset if necessary */ -@@ -3224,7 +3236,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) +@@ -3238,7 +3250,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) if (stock->nr_bytes > PAGE_SIZE) drain_obj_stock(stock); @@ -23928,7 +24020,7 @@ index 5082d0664..c574b293e 100644 } int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) -@@ -6827,12 +6839,12 @@ static int mem_cgroup_move_account(struct page *page, +@@ -6844,12 +6856,12 @@ static int mem_cgroup_move_account(struct page *page, ret = 0; @@ -23943,7 +24035,7 @@ index 5082d0664..c574b293e 100644 out_unlock: unlock_page(page); out: -@@ -7809,10 +7821,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) +@@ -7828,10 +7840,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) css_get(&memcg->css); commit_charge(page, memcg); @@ -23956,7 +24048,7 @@ index 5082d0664..c574b293e 100644 /* * Cgroup1's unified memory+swap counter has been charged with the -@@ -7868,11 +7880,11 @@ static void uncharge_batch(const struct uncharge_gather *ug) +@@ -7887,11 +7899,11 @@ static void uncharge_batch(const struct uncharge_gather *ug) memcg_oom_recover(ug->memcg); } @@ -23970,7 +24062,7 @@ index 5082d0664..c574b293e 100644 /* drop reference from uncharge_page */ css_put(&ug->memcg->css); -@@ -8044,10 +8056,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) +@@ -8063,10 +8075,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) css_get(&memcg->css); commit_charge(newpage, memcg); @@ -23983,7 +24075,7 @@ index 5082d0664..c574b293e 100644 } DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); -@@ -8177,9 +8189,13 @@ static int __init mem_cgroup_init(void) +@@ -8196,9 +8208,13 @@ static int __init mem_cgroup_init(void) cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, memcg_hotplug_cpu_dead); @@ -24000,7 +24092,7 @@ index 5082d0664..c574b293e 100644 for_each_node(node) { struct mem_cgroup_tree_per_node *rtpn; -@@ -8230,6 +8246,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) +@@ -8248,6 +8264,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) struct mem_cgroup *memcg, *swap_memcg; unsigned int nr_entries; unsigned short oldid; @@ -24008,7 +24100,7 @@ index 5082d0664..c574b293e 100644 VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); -@@ -8275,9 +8292,13 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) +@@ -8293,9 +8310,13 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) * important here to have the interrupts disabled because it is the * only synchronisation we have for updating the per-CPU variables. */ @@ -24023,7 +24115,7 @@ index 5082d0664..c574b293e 100644 css_put(&memcg->css); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index 9e85e8b52..8f0d4b6c3 100644 +index d2a8ec193..66d9600a1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -61,6 +61,7 @@ @@ -24379,7 +24471,7 @@ index 9e85e8b52..8f0d4b6c3 100644 return NULL; } -@@ -9344,7 +9414,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -9410,7 +9480,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ @@ -24388,7 +24480,7 @@ index 9e85e8b52..8f0d4b6c3 100644 if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); -@@ -9353,7 +9423,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -9419,7 +9489,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } @@ -24398,7 +24490,7 @@ index 9e85e8b52..8f0d4b6c3 100644 #ifdef CONFIG_MEMORY_HOTREMOVE diff --git a/mm/shmem.c b/mm/shmem.c -index f7caf1dec..0ef372e7d 100644 +index 4ef1d0e69..be31c46af 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -307,10 +307,10 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) @@ -25363,10 +25455,10 @@ index ec1c3a376..559fcc2a3 100644 for (i = 0; i < t.count; i++) { diff --git a/mm/vmalloc.c b/mm/vmalloc.c -index e27cd716c..1285e5cb0 100644 +index caba5659d..1df9eb63d 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c -@@ -1889,7 +1889,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) +@@ -1894,7 +1894,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) struct vmap_block *vb; struct vmap_area *va; unsigned long vb_idx; @@ -25375,7 +25467,7 @@ index e27cd716c..1285e5cb0 100644 void *vaddr; node = numa_node_id(); -@@ -1926,11 +1926,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) +@@ -1931,11 +1931,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) return ERR_PTR(err); } @@ -25390,7 +25482,7 @@ index e27cd716c..1285e5cb0 100644 return vaddr; } -@@ -1995,6 +1996,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) +@@ -2000,6 +2001,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) struct vmap_block *vb; void *vaddr = NULL; unsigned int order; @@ -25398,7 +25490,7 @@ index e27cd716c..1285e5cb0 100644 BUG_ON(offset_in_page(size)); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); -@@ -2009,7 +2011,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) +@@ -2014,7 +2016,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) order = get_order(size); rcu_read_lock(); @@ -25408,7 +25500,7 @@ index e27cd716c..1285e5cb0 100644 list_for_each_entry_rcu(vb, &vbq->free, free_list) { unsigned long pages_off; -@@ -2032,7 +2035,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) +@@ -2037,7 +2040,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) break; } @@ -25794,7 +25886,7 @@ index 6079f5625..a928c7136 100644 migrate_read_unlock(zspage); unpin_tag(handle); diff --git a/net/Kconfig b/net/Kconfig -index a22c3fb88..5a17bded7 100644 +index 232075ae1..4221c79b7 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -280,7 +280,7 @@ config CGROUP_NET_CLASSID @@ -25807,7 +25899,7 @@ index a22c3fb88..5a17bded7 100644 config BQL bool diff --git a/net/core/dev.c b/net/core/dev.c -index 5a1994be7..254a4221c 100644 +index b7a4f0bac..f65f77abd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -221,14 +221,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) @@ -25827,7 +25919,7 @@ index 5a1994be7..254a4221c 100644 #endif } -@@ -3051,6 +3051,7 @@ static void __netif_reschedule(struct Qdisc *q) +@@ -3053,6 +3053,7 @@ static void __netif_reschedule(struct Qdisc *q) sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); @@ -25835,7 +25927,7 @@ index 5a1994be7..254a4221c 100644 } void __netif_schedule(struct Qdisc *q) -@@ -3113,6 +3114,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) +@@ -3115,6 +3116,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) __this_cpu_write(softnet_data.completion_queue, skb); raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); @@ -25843,7 +25935,7 @@ index 5a1994be7..254a4221c 100644 } EXPORT_SYMBOL(__dev_kfree_skb_irq); -@@ -3791,7 +3793,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, +@@ -3795,7 +3797,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, * This permits qdisc->running owner to get the lock more * often and dequeue packets faster. */ @@ -25855,7 +25947,7 @@ index 5a1994be7..254a4221c 100644 if (unlikely(contended)) spin_lock(&q->busylock); -@@ -4591,6 +4597,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, +@@ -4597,6 +4603,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, rps_unlock(sd); local_irq_restore(flags); @@ -25863,7 +25955,7 @@ index 5a1994be7..254a4221c 100644 atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); -@@ -4810,7 +4817,7 @@ static int netif_rx_internal(struct sk_buff *skb) +@@ -4816,7 +4823,7 @@ static int netif_rx_internal(struct sk_buff *skb) struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; @@ -25872,7 +25964,7 @@ index 5a1994be7..254a4221c 100644 rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); -@@ -4820,14 +4827,14 @@ static int netif_rx_internal(struct sk_buff *skb) +@@ -4826,14 +4833,14 @@ static int netif_rx_internal(struct sk_buff *skb) ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); @@ -25890,7 +25982,7 @@ index 5a1994be7..254a4221c 100644 } return ret; } -@@ -4866,11 +4873,9 @@ int netif_rx_ni(struct sk_buff *skb) +@@ -4872,11 +4879,9 @@ int netif_rx_ni(struct sk_buff *skb) trace_netif_rx_ni_entry(skb); @@ -25904,7 +25996,7 @@ index 5a1994be7..254a4221c 100644 trace_netif_rx_ni_exit(err); return err; -@@ -6346,12 +6351,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) +@@ -6353,12 +6358,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) sd->rps_ipi_list = NULL; local_irq_enable(); @@ -25919,7 +26011,7 @@ index 5a1994be7..254a4221c 100644 } static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) -@@ -6429,6 +6436,7 @@ void __napi_schedule(struct napi_struct *n) +@@ -6436,6 +6443,7 @@ void __napi_schedule(struct napi_struct *n) local_irq_save(flags); ____napi_schedule(this_cpu_ptr(&softnet_data), n); local_irq_restore(flags); @@ -25927,7 +26019,7 @@ index 5a1994be7..254a4221c 100644 } EXPORT_SYMBOL(__napi_schedule); -@@ -10987,6 +10995,7 @@ static int dev_cpu_dead(unsigned int oldcpu) +@@ -10994,6 +11002,7 @@ static int dev_cpu_dead(unsigned int oldcpu) raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); @@ -25935,7 +26027,7 @@ index 5a1994be7..254a4221c 100644 #ifdef CONFIG_RPS remsd = oldsd->rps_ipi_list; -@@ -11000,7 +11009,7 @@ static int dev_cpu_dead(unsigned int oldcpu) +@@ -11007,7 +11016,7 @@ static int dev_cpu_dead(unsigned int oldcpu) netif_rx_ni(skb); input_queue_head_incr(oldsd); } @@ -25944,7 +26036,7 @@ index 5a1994be7..254a4221c 100644 netif_rx_ni(skb); input_queue_head_incr(oldsd); } -@@ -11316,7 +11325,7 @@ static int __init net_dev_init(void) +@@ -11323,7 +11332,7 @@ static int __init net_dev_init(void) INIT_WORK(flush, flush_backlog); @@ -26035,10 +26127,10 @@ index e491b083b..ef432cea2 100644 struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) diff --git a/net/core/sock.c b/net/core/sock.c -index d8d42ff15..d23b79afa 100644 +index 9741b4db4..a4195e738 100644 --- a/net/core/sock.c +++ b/net/core/sock.c -@@ -3068,12 +3068,11 @@ void lock_sock_nested(struct sock *sk, int subclass) +@@ -3079,12 +3079,11 @@ void lock_sock_nested(struct sock *sk, int subclass) if (sk->sk_lock.owned) __lock_sock(sk); sk->sk_lock.owned = 1; @@ -26052,7 +26144,7 @@ index d8d42ff15..d23b79afa 100644 } EXPORT_SYMBOL(lock_sock_nested); -@@ -3122,12 +3121,11 @@ bool lock_sock_fast(struct sock *sk) +@@ -3133,12 +3132,11 @@ bool lock_sock_fast(struct sock *sk) __lock_sock(sk); sk->sk_lock.owned = 1; @@ -26067,10 +26159,10 @@ index d8d42ff15..d23b79afa 100644 } EXPORT_SYMBOL(lock_sock_fast); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c -index b8eecd670..070473934 100644 +index 8c23a68fa..53fd742c8 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c -@@ -1271,7 +1271,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, +@@ -1276,7 +1276,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, rcu_assign_pointer(sch->stab, stab); } if (tca[TCA_RATE]) { @@ -26134,10 +26226,10 @@ index f56b4df6c..6cb833516 100644 } EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c -index ac2f1a733..84f421e6b 100644 +index 73f5cbae6..a976e2389 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c -@@ -2673,7 +2673,8 @@ int __net_init xfrm_state_init(struct net *net) +@@ -2670,7 +2670,8 @@ int __net_init xfrm_state_init(struct net *net) net->xfrm.state_num = 0; INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize); spin_lock_init(&net->xfrm.xfrm_state_lock); diff --git a/0001-modify-openeuler_defconfig-for-rt62.patch b/0001-modify-openeuler_defconfig-for-rt62.patch index 2156deb..4774174 100644 --- a/0001-modify-openeuler_defconfig-for-rt62.patch +++ b/0001-modify-openeuler_defconfig-for-rt62.patch @@ -1,52 +1,53 @@ -From 0026e130f88770f45b00f52ba1374dd90b8be0f0 Mon Sep 17 00:00:00 2001 -From: liyulei -Date: Mon, 6 Feb 2023 18:04:41 +0800 -Subject: [PATCH 2/2] modify openeuler_defconfig for rt62 +From 744a4eb5a8241919999033f304b48cf801918fd4 Mon Sep 17 00:00:00 2001 +From: zhangyu +Date: Thu, 14 Dec 2023 11:38:19 +0800 +Subject: [PATCH] modify-openeuler_defconfig-for-rt62.patch --- - arch/arm64/configs/openeuler_defconfig | 5 +++-- + arch/arm64/configs/openeuler_defconfig | 6 +++--- arch/arm64/kernel/fpsimd.c | 4 ++-- - arch/x86/configs/openeuler_defconfig | 7 ++++--- + arch/x86/configs/openeuler_defconfig | 8 ++++---- arch/x86/include/asm/preempt.h | 16 +++++++++++++--- include/linux/printk.h | 2 +- kernel/printk/printk.c | 2 +- - 6 files changed, 24 insertions(+), 12 deletions(-) + 6 files changed, 24 insertions(+), 14 deletions(-) diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig -index 30384eacad4f..b0ed4a9b3058 100644 +index 12e4828c9..7edb9923f 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig -@@ -74,6 +74,7 @@ CONFIG_HIGH_RES_TIMERS=y +@@ -87,7 +87,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set +- +CONFIG_PREEMPT_RT=y - # # CPU/Task time and stats accounting -@@ -733,7 +734,7 @@ CONFIG_ACPI_MPAM=y + # +@@ -760,7 +760,7 @@ CONFIG_ACPI_MPAM=y CONFIG_ACPI_PPTT=y # CONFIG_PMIC_OPREGION is not set CONFIG_IRQ_BYPASS_MANAGER=y -CONFIG_VIRTUALIZATION=y -+# CONFIG_VIRTUALIZATION is not set ++#CONFIG_VIRTUALIZATION=y CONFIG_KVM=y CONFIG_HAVE_KVM_IRQCHIP=y CONFIG_HAVE_KVM_IRQFD=y -@@ -1125,7 +1126,7 @@ CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +@@ -1156,7 +1156,7 @@ CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y # CONFIG_GUP_BENCHMARK is not set # CONFIG_READ_ONLY_THP_FOR_FS is not set CONFIG_ARCH_HAS_PTE_SPECIAL=y -CONFIG_PIN_MEMORY=y -+# CONFIG_PIN_MEMORY is not set ++#CONFIG_PIN_MEMORY is not set CONFIG_PID_RESERVE=y CONFIG_MEMORY_RELIABLE=y # CONFIG_CLEAR_FREELIST_PAGE is not set diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c -index 84520f11667d..aa631771e0dc 100644 +index 131b2dda3..992c11f2c 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c -@@ -180,7 +180,7 @@ static void __get_cpu_fpsimd_context(void) +@@ -234,7 +234,7 @@ static void __get_cpu_fpsimd_context(void) */ static void get_cpu_fpsimd_context(void) { @@ -55,7 +56,7 @@ index 84520f11667d..aa631771e0dc 100644 __get_cpu_fpsimd_context(); } -@@ -201,7 +201,7 @@ static void __put_cpu_fpsimd_context(void) +@@ -255,7 +255,7 @@ static void __put_cpu_fpsimd_context(void) static void put_cpu_fpsimd_context(void) { __put_cpu_fpsimd_context(); @@ -65,22 +66,23 @@ index 84520f11667d..aa631771e0dc 100644 static bool have_cpu_fpsimd_context(void) diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig -index 5ada612f1d75..685a49ca1fef 100644 +index 3db754e79..14cf52af1 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig -@@ -89,9 +89,10 @@ CONFIG_NO_HZ=y - CONFIG_HIGH_RES_TIMERS=y +@@ -89,10 +89,10 @@ CONFIG_HIGH_RES_TIMERS=y + CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y # end of Timers subsystem -CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_NONE is not set # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set +- +CONFIG_PREEMPT_RT=y - # # CPU/Task time and stats accounting -@@ -206,7 +207,7 @@ CONFIG_HAVE_UID16=y + # +@@ -212,7 +212,7 @@ CONFIG_HAVE_UID16=y CONFIG_SYSCTL_EXCEPTION_TRACE=y CONFIG_HAVE_PCSPKR_PLATFORM=y CONFIG_BPF=y @@ -89,7 +91,7 @@ index 5ada612f1d75..685a49ca1fef 100644 CONFIG_UID16=y CONFIG_MULTIUSER=y CONFIG_SGETMASK_SYSCALL=y -@@ -735,7 +736,7 @@ CONFIG_KVM_COMPAT=y +@@ -748,7 +748,7 @@ CONFIG_KVM_COMPAT=y CONFIG_HAVE_KVM_IRQ_BYPASS=y CONFIG_HAVE_KVM_NO_POLL=y CONFIG_KVM_XFER_TO_GUEST_WORK=y @@ -99,7 +101,7 @@ index 5ada612f1d75..685a49ca1fef 100644 CONFIG_KVM_INTEL=m CONFIG_X86_SGX_KVM=y diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h -index a3b73de276c3..dda9ec24fdc1 100644 +index a3b73de27..dda9ec24f 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -134,10 +134,8 @@ static __always_inline bool should_resched(int preempt_offset) @@ -132,7 +134,7 @@ index a3b73de276c3..dda9ec24fdc1 100644 + #endif /* __ASM_PREEMPT_H */ diff --git a/include/linux/printk.h b/include/linux/printk.h -index 9331b131ba25..23946f4828b2 100644 +index 26c70fd11..c82c971e8 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -241,7 +241,7 @@ __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); @@ -145,18 +147,18 @@ index 9331b131ba25..23946f4828b2 100644 #else static inline void zap_locks(void) { } diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index e95b00f24c75..59bd804e662a 100644 +index 45239dcfb..0f9b88d38 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c -@@ -1813,7 +1813,7 @@ static void print_sync_until(struct console *con, u64 seq) +@@ -1826,7 +1826,7 @@ static void print_sync_until(struct console *con, u64 seq) console_atomic_unlock(flags); } - + -#if defined(CONFIG_X86) || defined(CONFIG_ARM64_PSEUDO_NMI) +#if (defined(CONFIG_X86) || defined(CONFIG_ARM64_PSEUDO_NMI)) && !defined(CONFIG_PREEMPT_RT) void zap_locks(void) { // if (raw_spin_is_locked(&logbuf_lock)) { -- -2.36.1 +2.33.0 diff --git a/kernel-rt.spec b/kernel-rt.spec index 4fc925e..59c89e3 100644 --- a/kernel-rt.spec +++ b/kernel-rt.spec @@ -10,9 +10,9 @@ %global upstream_version 5.10 %global upstream_sublevel 0 -%global devel_release 161 +%global devel_release 177 %global maintenance_release .0.0 -%global pkg_release .60 +%global pkg_release .61 %global rt_release .rt62 %define with_debuginfo 1 @@ -891,6 +891,9 @@ fi %endif %changelog +* Fri Dec 14 2023 zhangyu - 5.10.0-177.0.0.61 +- update kernel-rt version to 5.10.0-177.0.0 + * Mon Nov 1 2023 zhangyu - 5.10.0-161.0.0.60 - update kernel-rt version to 5.10.0-161.0.0 diff --git a/raspberrypi-kernel-rt.spec b/raspberrypi-kernel-rt.spec index a01ddba..e6344c7 100644 --- a/raspberrypi-kernel-rt.spec +++ b/raspberrypi-kernel-rt.spec @@ -2,13 +2,13 @@ %global KernelVer %{version}-%{release}.raspi.%{_target_cpu} -%global hulkrelease 161.0.0 +%global hulkrelease 177.0.0 %global debug_package %{nil} Name: raspberrypi-kernel-rt Version: 5.10.0 -Release: %{hulkrelease}.rt62.10 +Release: %{hulkrelease}.rt62.11 Summary: Linux Kernel License: GPLv2 URL: http://www.kernel.org/ @@ -172,6 +172,9 @@ install -m 644 /boot/dtb-%{KernelVer}/overlays/README /boot/overlays/ /lib/modules/%{KernelVer} %changelog +* Fri Dec 12 2023 zhangyu - 5.10.0-177.0.0.11 +- - update preempt-RT to openEuler 5.10.0-177.0.0 + * Mon Dec 30 2023 zhangyu - 5.10.0-161.0.0.10 - - update preempt-RT to openEuler 5.10.0-161.0.0