/*
* Copyright (C) 2007 Oracle. All rights reserved.
+ * Copyright (C) 2014 Fujitsu. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/freezer.h>
+#include <linux/workqueue.h>
#include "async-thread.h"
#define WORK_QUEUED_BIT 0
#define WORK_ORDER_DONE_BIT 2
#define WORK_HIGH_PRIO_BIT 3
+#define NO_THRESHOLD (-1)
+#define DFT_THRESHOLD (32)
+
/*
* container for the kthread task pointer and the list of pending work
* One of these is allocated per thread.
struct btrfs_work *work = NULL;
struct list_head *cur = NULL;
- if (!list_empty(prio_head))
+ if (!list_empty(prio_head)) {
cur = prio_head->next;
+ goto out;
+ }
smp_mb();
if (!list_empty(&worker->prio_pending))
goto refill;
- if (!list_empty(head))
+ if (!list_empty(head)) {
cur = head->next;
-
- if (cur)
goto out;
+ }
refill:
spin_lock_irq(&worker->lock);
wake_up_process(worker->task);
spin_unlock_irqrestore(&worker->lock, flags);
}
+
+struct __btrfs_workqueue_struct {
+ struct workqueue_struct *normal_wq;
+ /* List head pointing to ordered work list */
+ struct list_head ordered_list;
+
+ /* Spinlock for ordered_list */
+ spinlock_t list_lock;
+
+ /* Thresholding related variants */
+ atomic_t pending;
+ int max_active;
+ int current_max;
+ int thresh;
+ unsigned int count;
+ spinlock_t thres_lock;
+};
+
+struct btrfs_workqueue_struct {
+ struct __btrfs_workqueue_struct *normal;
+ struct __btrfs_workqueue_struct *high;
+};
+
+static inline struct __btrfs_workqueue_struct
+*__btrfs_alloc_workqueue(char *name, int flags, int max_active, int thresh)
+{
+ struct __btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS);
+
+ if (unlikely(!ret))
+ return NULL;
+
+ ret->max_active = max_active;
+ atomic_set(&ret->pending, 0);
+ if (thresh == 0)
+ thresh = DFT_THRESHOLD;
+ /* For low threshold, disabling threshold is a better choice */
+ if (thresh < DFT_THRESHOLD) {
+ ret->current_max = max_active;
+ ret->thresh = NO_THRESHOLD;
+ } else {
+ ret->current_max = 1;
+ ret->thresh = thresh;
+ }
+
+ if (flags & WQ_HIGHPRI)
+ ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
+ ret->max_active,
+ "btrfs", name);
+ else
+ ret->normal_wq = alloc_workqueue("%s-%s", flags,
+ ret->max_active, "btrfs",
+ name);
+ if (unlikely(!ret->normal_wq)) {
+ kfree(ret);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&ret->ordered_list);
+ spin_lock_init(&ret->list_lock);
+ spin_lock_init(&ret->thres_lock);
+ return ret;
+}
+
+static inline void
+__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq);
+
+struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
+ int flags,
+ int max_active,
+ int thresh)
+{
+ struct btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS);
+
+ if (unlikely(!ret))
+ return NULL;
+
+ ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
+ max_active, thresh);
+ if (unlikely(!ret->normal)) {
+ kfree(ret);
+ return NULL;
+ }
+
+ if (flags & WQ_HIGHPRI) {
+ ret->high = __btrfs_alloc_workqueue(name, flags, max_active,
+ thresh);
+ if (unlikely(!ret->high)) {
+ __btrfs_destroy_workqueue(ret->normal);
+ kfree(ret);
+ return NULL;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Hook for threshold which will be called in btrfs_queue_work.
+ * This hook WILL be called in IRQ handler context,
+ * so workqueue_set_max_active MUST NOT be called in this hook
+ */
+static inline void thresh_queue_hook(struct __btrfs_workqueue_struct *wq)
+{
+ if (wq->thresh == NO_THRESHOLD)
+ return;
+ atomic_inc(&wq->pending);
+}
+
+/*
+ * Hook for threshold which will be called before executing the work,
+ * This hook is called in kthread content.
+ * So workqueue_set_max_active is called here.
+ */
+static inline void thresh_exec_hook(struct __btrfs_workqueue_struct *wq)
+{
+ int new_max_active;
+ long pending;
+ int need_change = 0;
+
+ if (wq->thresh == NO_THRESHOLD)
+ return;
+
+ atomic_dec(&wq->pending);
+ spin_lock(&wq->thres_lock);
+ /*
+ * Use wq->count to limit the calling frequency of
+ * workqueue_set_max_active.
+ */
+ wq->count++;
+ wq->count %= (wq->thresh / 4);
+ if (!wq->count)
+ goto out;
+ new_max_active = wq->current_max;
+
+ /*
+ * pending may be changed later, but it's OK since we really
+ * don't need it so accurate to calculate new_max_active.
+ */
+ pending = atomic_read(&wq->pending);
+ if (pending > wq->thresh)
+ new_max_active++;
+ if (pending < wq->thresh / 2)
+ new_max_active--;
+ new_max_active = clamp_val(new_max_active, 1, wq->max_active);
+ if (new_max_active != wq->current_max) {
+ need_change = 1;
+ wq->current_max = new_max_active;
+ }
+out:
+ spin_unlock(&wq->thres_lock);
+
+ if (need_change) {
+ workqueue_set_max_active(wq->normal_wq, wq->current_max);
+ }
+}
+
+static void run_ordered_work(struct __btrfs_workqueue_struct *wq)
+{
+ struct list_head *list = &wq->ordered_list;
+ struct btrfs_work_struct *work;
+ spinlock_t *lock = &wq->list_lock;
+ unsigned long flags;
+
+ while (1) {
+ spin_lock_irqsave(lock, flags);
+ if (list_empty(list))
+ break;
+ work = list_entry(list->next, struct btrfs_work_struct,
+ ordered_list);
+ if (!test_bit(WORK_DONE_BIT, &work->flags))
+ break;
+
+ /*
+ * we are going to call the ordered done function, but
+ * we leave the work item on the list as a barrier so
+ * that later work items that are done don't have their
+ * functions called before this one returns
+ */
+ if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
+ break;
+ spin_unlock_irqrestore(lock, flags);
+ work->ordered_func(work);
+
+ /* now take the lock again and drop our item from the list */
+ spin_lock_irqsave(lock, flags);
+ list_del(&work->ordered_list);
+ spin_unlock_irqrestore(lock, flags);
+
+ /*
+ * we don't want to call the ordered free functions
+ * with the lock held though
+ */
+ work->ordered_free(work);
+ }
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static void normal_work_helper(struct work_struct *arg)
+{
+ struct btrfs_work_struct *work;
+ struct __btrfs_workqueue_struct *wq;
+ int need_order = 0;
+
+ work = container_of(arg, struct btrfs_work_struct, normal_work);
+ /*
+ * We should not touch things inside work in the following cases:
+ * 1) after work->func() if it has no ordered_free
+ * Since the struct is freed in work->func().
+ * 2) after setting WORK_DONE_BIT
+ * The work may be freed in other threads almost instantly.
+ * So we save the needed things here.
+ */
+ if (work->ordered_func)
+ need_order = 1;
+ wq = work->wq;
+
+ thresh_exec_hook(wq);
+ work->func(work);
+ if (need_order) {
+ set_bit(WORK_DONE_BIT, &work->flags);
+ run_ordered_work(wq);
+ }
+}
+
+void btrfs_init_work(struct btrfs_work_struct *work,
+ void (*func)(struct btrfs_work_struct *),
+ void (*ordered_func)(struct btrfs_work_struct *),
+ void (*ordered_free)(struct btrfs_work_struct *))
+{
+ work->func = func;
+ work->ordered_func = ordered_func;
+ work->ordered_free = ordered_free;
+ INIT_WORK(&work->normal_work, normal_work_helper);
+ INIT_LIST_HEAD(&work->ordered_list);
+ work->flags = 0;
+}
+
+static inline void __btrfs_queue_work(struct __btrfs_workqueue_struct *wq,
+ struct btrfs_work_struct *work)
+{
+ unsigned long flags;
+
+ work->wq = wq;
+ thresh_queue_hook(wq);
+ if (work->ordered_func) {
+ spin_lock_irqsave(&wq->list_lock, flags);
+ list_add_tail(&work->ordered_list, &wq->ordered_list);
+ spin_unlock_irqrestore(&wq->list_lock, flags);
+ }
+ queue_work(wq->normal_wq, &work->normal_work);
+}
+
+void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
+ struct btrfs_work_struct *work)
+{
+ struct __btrfs_workqueue_struct *dest_wq;
+
+ if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
+ dest_wq = wq->high;
+ else
+ dest_wq = wq->normal;
+ __btrfs_queue_work(dest_wq, work);
+}
+
+static inline void
+__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq)
+{
+ destroy_workqueue(wq->normal_wq);
+ kfree(wq);
+}
+
+void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq)
+{
+ if (!wq)
+ return;
+ if (wq->high)
+ __btrfs_destroy_workqueue(wq->high);
+ __btrfs_destroy_workqueue(wq->normal);
+}
+
+void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max)
+{
+ wq->normal->max_active = max;
+ if (wq->high)
+ wq->high->max_active = max;
+}
+
+void btrfs_set_work_high_priority(struct btrfs_work_struct *work)
+{
+ set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
+}