diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index d4bf547562cda29dd6ad66c6635b0a268b3b8468..8f77da2fa0e9d9f00d55fc435ac48dbc8b01f502 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -865,6 +865,16 @@ config ARCH_WANT_HUGE_PMD_SHARE config ARCH_HAS_CACHE_LINE_SIZE def_bool y +config ARCH_LLC_128_WORKAROUND + bool "Workaround for 128 bytes LLC cacheline" + depends on ARM64 + default n + help + LLC cacheline size may be up to 128 bytes, and this + is useful if you want to do workaround on such + case. It can be used to align memory address to get + good cache utilization et al. + config SECCOMP bool "Enable seccomp to safely compute untrusted bytecode" ---help--- diff --git a/configs/config-4.19.y-aarch64 b/configs/config-4.19.y-aarch64 index 2874a84e4ad8510447b332637be6b88e2d17ae92..a94394a4743d89b145552224867d385ee965baca 100644 --- a/configs/config-4.19.y-aarch64 +++ b/configs/config-4.19.y-aarch64 @@ -435,6 +435,7 @@ CONFIG_HW_PERF_EVENTS=y CONFIG_SYS_SUPPORTS_HUGETLBFS=y CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_LLC_128_WORKAROUND=y CONFIG_SECCOMP=y CONFIG_PARAVIRT=y CONFIG_PARAVIRT_TIME_ACCOUNTING=y diff --git a/configs/config-4.19.y-aarch64-debug b/configs/config-4.19.y-aarch64-debug index cf12cc91628ab686e249b9535860cb962d86babc..3b4b3639dccb45f0eb3b20853a2c9ae56689ccc3 100644 --- a/configs/config-4.19.y-aarch64-debug +++ b/configs/config-4.19.y-aarch64-debug @@ -436,6 +436,7 @@ CONFIG_HW_PERF_EVENTS=y CONFIG_SYS_SUPPORTS_HUGETLBFS=y CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_LLC_128_WORKAROUND=y CONFIG_SECCOMP=y CONFIG_PARAVIRT=y CONFIG_PARAVIRT_TIME_ACCOUNTING=y diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 81ee5b83c92007702b59e374aa6296e8190f5dc7..4b80c7c025f85b2d4bc59c95c39bfdf39fe9699e 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -48,9 +48,24 @@ enum timekeeping_adv_mode { * cache line. */ static struct { +#ifdef CONFIG_ARCH_LLC_128_WORKAROUND + /* Start seq on the middle of 128 bytes aligned address to + * keep some members of tk_core in the same 64 bytes for + * principle of locality while pushing others to another LLC + * cacheline to avoid false sharing. + */ + u8 padding1[64]; + seqcount_t seq; + /* Push some timekeeper memebers to another LLC cacheline */ + u8 padding2[16]; + struct timekeeper timekeeper; + /* For 128 bytes LLC cacheline */ +} tk_core __aligned(128) = { +#else seqcount_t seq; struct timekeeper timekeeper; } tk_core ____cacheline_aligned = { +#endif .seq = SEQCNT_ZERO(tk_core.seq), };