]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
x86/mcelog: Get rid of RCU remnants
authorBorislav Petkov <bp@suse.de>
Wed, 1 Nov 2017 16:47:54 +0000 (17:47 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 1 Nov 2017 20:24:36 +0000 (21:24 +0100)
Jeremy reported a suspicious RCU usage warning in mcelog.

/dev/mcelog is called in process context now as part of the notifier
chain and doesn't need any of the fancy RCU and lockless accesses which
it did in atomic context.

Axe it all in favor of a simple mutex synchronization which cures the
problem reported.

Fixes: 5de97c9f6d85 ("x86/mce: Factor out and deprecate the /dev/mcelog driver")
Reported-by: Jeremy Cline <jcline@redhat.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-and-tested-by: Tony Luck <tony.luck@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: linux-edac@vger.kernel.org
Cc: Laura Abbott <labbott@redhat.com>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/20171101164754.xzzmskl4ngrqc5br@pd.tnic
Link: https://bugzilla.redhat.com/show_bug.cgi?id=1498969
arch/x86/kernel/cpu/mcheck/dev-mcelog.c

index 10cec43aac389790e591f76be31de5cda0780c1c..7f85b76f43bcc622aa537e3bc40a35860f316479 100644 (file)
@@ -24,14 +24,6 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
 static char mce_helper[128];
 static char *mce_helper_argv[2] = { mce_helper, NULL };
 
-#define mce_log_get_idx_check(p) \
-({ \
-       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
-                        !lockdep_is_held(&mce_chrdev_read_mutex), \
-                        "suspicious mce_log_get_idx_check() usage"); \
-       smp_load_acquire(&(p)); \
-})
-
 /*
  * Lockless MCE logging infrastructure.
  * This avoids deadlocks on printk locks without having to break locks. Also
@@ -53,43 +45,32 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val,
                                void *data)
 {
        struct mce *mce = (struct mce *)data;
-       unsigned int next, entry;
-
-       wmb();
-       for (;;) {
-               entry = mce_log_get_idx_check(mcelog.next);
-               for (;;) {
-
-                       /*
-                        * When the buffer fills up discard new entries.
-                        * Assume that the earlier errors are the more
-                        * interesting ones:
-                        */
-                       if (entry >= MCE_LOG_LEN) {
-                               set_bit(MCE_OVERFLOW,
-                                       (unsigned long *)&mcelog.flags);
-                               return NOTIFY_OK;
-                       }
-                       /* Old left over entry. Skip: */
-                       if (mcelog.entry[entry].finished) {
-                               entry++;
-                               continue;
-                       }
-                       break;
-               }
-               smp_rmb();
-               next = entry + 1;
-               if (cmpxchg(&mcelog.next, entry, next) == entry)
-                       break;
+       unsigned int entry;
+
+       mutex_lock(&mce_chrdev_read_mutex);
+
+       entry = mcelog.next;
+
+       /*
+        * When the buffer fills up discard new entries. Assume that the
+        * earlier errors are the more interesting ones:
+        */
+       if (entry >= MCE_LOG_LEN) {
+               set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
+               goto unlock;
        }
+
+       mcelog.next = entry + 1;
+
        memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
-       wmb();
        mcelog.entry[entry].finished = 1;
-       wmb();
 
        /* wake processes polling /dev/mcelog */
        wake_up_interruptible(&mce_chrdev_wait);
 
+unlock:
+       mutex_unlock(&mce_chrdev_read_mutex);
+
        return NOTIFY_OK;
 }
 
@@ -177,13 +158,6 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static void collect_tscs(void *data)
-{
-       unsigned long *cpu_tsc = (unsigned long *)data;
-
-       cpu_tsc[smp_processor_id()] = rdtsc();
-}
-
 static int mce_apei_read_done;
 
 /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
@@ -231,14 +205,9 @@ static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
                                size_t usize, loff_t *off)
 {
        char __user *buf = ubuf;
-       unsigned long *cpu_tsc;
-       unsigned prev, next;
+       unsigned next;
        int i, err;
 
-       cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
-       if (!cpu_tsc)
-               return -ENOMEM;
-
        mutex_lock(&mce_chrdev_read_mutex);
 
        if (!mce_apei_read_done) {
@@ -247,65 +216,29 @@ static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
                        goto out;
        }
 
-       next = mce_log_get_idx_check(mcelog.next);
-
        /* Only supports full reads right now */
        err = -EINVAL;
        if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
                goto out;
 
+       next = mcelog.next;
        err = 0;
-       prev = 0;
-       do {
-               for (i = prev; i < next; i++) {
-                       unsigned long start = jiffies;
-                       struct mce *m = &mcelog.entry[i];
-
-                       while (!m->finished) {
-                               if (time_after_eq(jiffies, start + 2)) {
-                                       memset(m, 0, sizeof(*m));
-                                       goto timeout;
-                               }
-                               cpu_relax();
-                       }
-                       smp_rmb();
-                       err |= copy_to_user(buf, m, sizeof(*m));
-                       buf += sizeof(*m);
-timeout:
-                       ;
-               }
-
-               memset(mcelog.entry + prev, 0,
-                      (next - prev) * sizeof(struct mce));
-               prev = next;
-               next = cmpxchg(&mcelog.next, prev, 0);
-       } while (next != prev);
-
-       synchronize_sched();
 
-       /*
-        * Collect entries that were still getting written before the
-        * synchronize.
-        */
-       on_each_cpu(collect_tscs, cpu_tsc, 1);
-
-       for (i = next; i < MCE_LOG_LEN; i++) {
+       for (i = 0; i < next; i++) {
                struct mce *m = &mcelog.entry[i];
 
-               if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
-                       err |= copy_to_user(buf, m, sizeof(*m));
-                       smp_rmb();
-                       buf += sizeof(*m);
-                       memset(m, 0, sizeof(*m));
-               }
+               err |= copy_to_user(buf, m, sizeof(*m));
+               buf += sizeof(*m);
        }
 
+       memset(mcelog.entry, 0, next * sizeof(struct mce));
+       mcelog.next = 0;
+
        if (err)
                err = -EFAULT;
 
 out:
        mutex_unlock(&mce_chrdev_read_mutex);
-       kfree(cpu_tsc);
 
        return err ? err : buf - ubuf;
 }