]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
sh: initial stack protector support.
authorFilippo Arcidiacono <filippo.arcidiacono@st.com>
Thu, 19 Apr 2012 06:45:57 +0000 (15:45 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Thu, 19 Apr 2012 06:45:57 +0000 (15:45 +0900)
This implements basic -fstack-protector support, based on the early ARM
version in c743f38013aeff58ef6252601e397b5ba281c633. The SMP case is
limited to the initial canary value, while the UP case handles per-task
granularity (limited to 32-bit sh until a new enough sh64 compiler
manifests itself).

Signed-off-by: Filippo Arcidiacono <filippo.arcidiacono@st.com>
Reviewed-by: Carmelo Amoroso <carmelo.amoroso@st.com>
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/Kconfig
arch/sh/Makefile
arch/sh/include/asm/stackprotector.h [new file with mode: 0644]
arch/sh/kernel/process.c
arch/sh/kernel/process_32.c

index ff9e033ce626774fe5b0ad7ddf1c199fe7789471..60ed3669979d6eb388b689aa92009249be96fe6b 100644 (file)
@@ -685,6 +685,20 @@ config SECCOMP
 
          If unsure, say N.
 
+config CC_STACKPROTECTOR
+       bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
+       depends on SUPERH32 && EXPERIMENTAL
+       help
+         This option turns on the -fstack-protector GCC feature. This
+         feature puts, at the beginning of functions, a canary value on
+         the stack just before the return address, and validates
+         the value just before actually returning.  Stack based buffer
+         overflows (that need to overwrite this return address) now also
+         overwrite the canary, which gets detected and the attack is then
+         neutralized via a kernel panic.
+
+         This feature requires gcc version 4.2 or above.
+
 config SMP
        bool "Symmetric multi-processing support"
        depends on SYS_SUPPORTS_SMP
index 3fc0f413777cc152ee540fb1807ed282d065c384..24875c8c151490efc35fbda1ac8c5f87e1e66f9f 100644 (file)
@@ -199,6 +199,10 @@ ifeq ($(CONFIG_DWARF_UNWINDER),y)
   KBUILD_CFLAGS += -fasynchronous-unwind-tables
 endif
 
+ifeq ($(CONFIG_CC_STACKPROTECTOR),y)
+  KBUILD_CFLAGS += -fstack-protector
+endif
+
 libs-$(CONFIG_SUPERH32)                := arch/sh/lib/ $(libs-y)
 libs-$(CONFIG_SUPERH64)                := arch/sh/lib64/ $(libs-y)
 
diff --git a/arch/sh/include/asm/stackprotector.h b/arch/sh/include/asm/stackprotector.h
new file mode 100644 (file)
index 0000000..d9df3a7
--- /dev/null
@@ -0,0 +1,27 @@
+#ifndef __ASM_SH_STACKPROTECTOR_H
+#define __ASM_SH_STACKPROTECTOR_H
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+       unsigned long canary;
+
+       /* Try to get a semi random initial value. */
+       get_random_bytes(&canary, sizeof(canary));
+       canary ^= LINUX_VERSION_CODE;
+
+       current->stack_canary = canary;
+       __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* __ASM_SH_STACKPROTECTOR_H */
index 325f98b1736d4a1ab8fdc93f54bcd7cb3674d298..f3f03e4c785d4216da494fad8a026c7cf2f32820 100644 (file)
@@ -2,10 +2,17 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/export.h>
+#include <linux/stackprotector.h>
 
 struct kmem_cache *task_xstate_cachep = NULL;
 unsigned int xstate_size;
 
+#ifdef CONFIG_CC_STACKPROTECTOR
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
        *dst = *src;
index 94273aaf78c1605da62d82ea61dc14a746ec5dff..f78cc421e6659787585e54cb3fc9295ab32c3020 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/ftrace.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/prefetch.h>
+#include <linux/stackprotector.h>
 #include <asm/uaccess.h>
 #include <asm/mmu_context.h>
 #include <asm/fpu.h>
@@ -220,6 +221,10 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
 {
        struct thread_struct *next_t = &next->thread;
 
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+       __stack_chk_guard = next->stack_canary;
+#endif
+
        unlazy_fpu(prev, task_pt_regs(prev));
 
        /* we're going to use this soon, after a few expensive things */