From 714b08517988a74d988803966e6ae746953d65e3 Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Wed, 16 Jan 2019 14:29:50 +0300 Subject: [PATCH] ARCv2: Enable unaligned access in early ASM code BugLink: https://bugs.launchpad.net/bugs/1837813 commit 252f6e8eae909bc075a1b1e3b9efb095ae4c0b56 upstream. It is currently done in arc_init_IRQ() which might be too late considering gcc 7.3.1 onwards (GNU 2018.03) generates unaligned memory accesses by default Cc: stable@vger.kernel.org #4.4+ Signed-off-by: Eugeniy Paltsev Signed-off-by: Vineet Gupta [vgupta: rewrote changelog] Signed-off-by: Greg Kroah-Hartman Signed-off-by: Kamal Mostafa Signed-off-by: Khalid Elmously --- arch/arc/kernel/head.S | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 8b90d25a15cc..26e33a8b2d18 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -17,6 +17,7 @@ #include #include #include +#include .macro CPU_EARLY_SETUP @@ -47,6 +48,15 @@ sr r5, [ARC_REG_DC_CTRL] 1: + +#ifdef CONFIG_ISA_ARCV2 + ; Unaligned access is disabled at reset, so re-enable early as + ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access + ; by default + lr r5, [status32] + bset r5, r5, STATUS_AD_BIT + kflag r5 +#endif .endm .section .init.text, "ax",@progbits -- 2.39.2