]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
csky/ftrace: Fixup ftrace_modify_code deadlock without CPU_HAS_ICACHE_INS
authorGuo Ren <guoren@linux.alibaba.com>
Tue, 31 Mar 2020 14:15:42 +0000 (22:15 +0800)
committerGuo Ren <guoren@linux.alibaba.com>
Tue, 31 Mar 2020 14:15:42 +0000 (22:15 +0800)
If ICACHE_INS is not supported, we use IPI to sync icache on each
core. But ftrace_modify_code is called from stop_machine from default
implementation of arch_ftrace_update_code and stop_machine callback
is irq_disabled. When you call ipi with irq_disabled, a deadlock will
happen.

We couldn't use icache_flush with irq_disabled, but startup make_nop
is specific case and it needn't ipi other cores.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
arch/csky/kernel/ftrace.c
arch/csky/mm/cachev2.c

index b4502cd2eabeb90b14e23c99cbb503cf7f9c5019..44628e3f7fa689cf6ec9856dfb79bc4221da5a60 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/ftrace.h>
 #include <linux/uaccess.h>
+#include <linux/stop_machine.h>
 #include <asm/cacheflush.h>
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -201,5 +202,35 @@ int ftrace_disable_ftrace_graph_caller(void)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
+#ifndef CONFIG_CPU_HAS_ICACHE_INS
+struct ftrace_modify_param {
+       int command;
+       atomic_t cpu_count;
+};
+
+static int __ftrace_modify_code(void *data)
+{
+       struct ftrace_modify_param *param = data;
+
+       if (atomic_inc_return(&param->cpu_count) == 1) {
+               ftrace_modify_all_code(param->command);
+               atomic_inc(&param->cpu_count);
+       } else {
+               while (atomic_read(&param->cpu_count) <= num_online_cpus())
+                       cpu_relax();
+               local_icache_inv_all(NULL);
+       }
+
+       return 0;
+}
+
+void arch_ftrace_update_code(int command)
+{
+       struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
+
+       stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
+}
+#endif
+
 /* _mcount is defined in abi's mcount.S */
 EXPORT_SYMBOL(_mcount);
index bc419f8039d3144ddaa14d7e9a1a6352869c9970..7a9664adce4360b36a87a13b637d787fcec97e38 100644 (file)
@@ -7,8 +7,12 @@
 #include <asm/cache.h>
 #include <asm/barrier.h>
 
+/* for L1-cache */
 #define INS_CACHE              (1 << 0)
+#define DATA_CACHE             (1 << 1)
 #define CACHE_INV              (1 << 4)
+#define CACHE_CLR              (1 << 5)
+#define CACHE_OMS              (1 << 6)
 
 void local_icache_inv_all(void *priv)
 {
@@ -16,11 +20,6 @@ void local_icache_inv_all(void *priv)
        sync_is();
 }
 
-void icache_inv_all(void)
-{
-       on_each_cpu(local_icache_inv_all, NULL, 1);
-}
-
 #ifdef CONFIG_CPU_HAS_ICACHE_INS
 void icache_inv_range(unsigned long start, unsigned long end)
 {
@@ -31,9 +30,43 @@ void icache_inv_range(unsigned long start, unsigned long end)
        sync_is();
 }
 #else
+struct cache_range {
+       unsigned long start;
+       unsigned long end;
+};
+
+static DEFINE_SPINLOCK(cache_lock);
+
+static inline void cache_op_line(unsigned long i, unsigned int val)
+{
+       mtcr("cr22", i);
+       mtcr("cr17", val);
+}
+
+void local_icache_inv_range(void *priv)
+{
+       struct cache_range *param = priv;
+       unsigned long i = param->start & ~(L1_CACHE_BYTES - 1);
+       unsigned long flags;
+
+       spin_lock_irqsave(&cache_lock, flags);
+
+       for (; i < param->end; i += L1_CACHE_BYTES)
+               cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS);
+
+       spin_unlock_irqrestore(&cache_lock, flags);
+
+       sync_is();
+}
+
 void icache_inv_range(unsigned long start, unsigned long end)
 {
-       icache_inv_all();
+       struct cache_range param = { start, end };
+
+       if (irqs_disabled())
+               local_icache_inv_range(&param);
+       else
+               on_each_cpu(local_icache_inv_range, &param, 1);
 }
 #endif