]>
Commit | Line | Data |
---|---|---|
f4b37741 | 1 | #include <sys/taskq.h> |
f1ca4da6 | 2 | |
937879f1 | 3 | #ifdef DEBUG_SUBSYSTEM |
4 | #undef DEBUG_SUBSYSTEM | |
5 | #endif | |
6 | ||
7 | #define DEBUG_SUBSYSTEM S_TASKQ | |
8 | ||
f1ca4da6 | 9 | /* |
10 | * Task queue interface | |
11 | * | |
12 | * The taskq_work_wrapper functions are used to manage the work_structs | |
13 | * which must be submitted to linux. The shim layer allocates a wrapper | |
14 | * structure for all items which contains a pointer to itself as well as | |
15 | * the real work to be performed. When the work item run the generic | |
16 | * handle is called which calls the real work function and then using | |
17 | * the self pointer frees the work_struct. | |
18 | */ | |
19 | typedef struct taskq_work_wrapper { | |
20 | struct work_struct tww_work; | |
21 | task_func_t tww_func; | |
22 | void * tww_priv; | |
23 | } taskq_work_wrapper_t; | |
24 | ||
25 | static void | |
26 | taskq_work_handler(void *priv) | |
27 | { | |
28 | taskq_work_wrapper_t *tww = priv; | |
29 | ||
937879f1 | 30 | ASSERT(tww); |
31 | ASSERT(tww->tww_func); | |
f1ca4da6 | 32 | |
33 | /* Call the real function and free the wrapper */ | |
34 | tww->tww_func(tww->tww_priv); | |
35 | kfree(tww); | |
36 | } | |
37 | ||
38 | /* XXX - All flags currently ignored */ | |
39 | taskqid_t | |
40 | __taskq_dispatch(taskq_t *tq, task_func_t func, void *priv, uint_t flags) | |
41 | { | |
42 | struct workqueue_struct *wq = tq; | |
43 | taskq_work_wrapper_t *tww; | |
44 | int rc; | |
937879f1 | 45 | ENTRY; |
f1ca4da6 | 46 | |
937879f1 | 47 | ASSERT(tq); |
48 | ASSERT(func); | |
f1ca4da6 | 49 | |
0a6fd143 | 50 | /* Use GFP_ATOMIC since this may be called in interrupt context */ |
51 | tww = (taskq_work_wrapper_t *)kmalloc(sizeof(*tww), GFP_ATOMIC); | |
f1ca4da6 | 52 | if (!tww) |
937879f1 | 53 | RETURN((taskqid_t)0); |
f1ca4da6 | 54 | |
55 | INIT_WORK(&(tww->tww_work), taskq_work_handler, tww); | |
56 | tww->tww_func = func; | |
57 | tww->tww_priv = priv; | |
58 | ||
59 | rc = queue_work(wq, &(tww->tww_work)); | |
60 | if (!rc) { | |
61 | kfree(tww); | |
937879f1 | 62 | RETURN((taskqid_t)0); |
f1ca4da6 | 63 | } |
64 | ||
937879f1 | 65 | RETURN((taskqid_t)wq); |
f1ca4da6 | 66 | } |
f1b59d26 | 67 | EXPORT_SYMBOL(__taskq_dispatch); |
f1ca4da6 | 68 | |
6e605b6e | 69 | /* XXX - We must fully implement dynamic workqueues since they make a |
70 | * significant impact in terms of performance. For now I've made | |
71 | * a trivial compromise. If you ask for one thread you get one | |
72 | * thread, if you ask for more than that you get one per core. | |
73 | * It's unclear if you ever really need/want more than one per-core | |
74 | * anyway. More analysis is required. | |
75 | * | |
76 | * name - Workqueue names are limited to 10 chars | |
f1ca4da6 | 77 | * pri - Ignore priority |
78 | * min - Ignored until this is a dynamic thread pool | |
79 | * max - Ignored until this is a dynamic thread pool | |
80 | * flags - Ignored until this is a dynamic thread_pool | |
81 | */ | |
82 | taskq_t * | |
83 | __taskq_create(const char *name, int nthreads, pri_t pri, | |
84 | int minalloc, int maxalloc, uint_t flags) | |
85 | { | |
6e605b6e | 86 | taskq_t *tq; |
937879f1 | 87 | ENTRY; |
6e605b6e | 88 | |
89 | if (nthreads == 1) | |
90 | tq = create_singlethread_workqueue(name); | |
91 | else | |
92 | tq = create_workqueue(name); | |
93 | ||
94 | return tq; | |
f1ca4da6 | 95 | } |
f1b59d26 | 96 | EXPORT_SYMBOL(__taskq_create); |
b123971f | 97 | |
98 | void | |
99 | __taskq_destroy(taskq_t *tq) | |
100 | { | |
937879f1 | 101 | ENTRY; |
b123971f | 102 | destroy_workqueue(tq); |
937879f1 | 103 | EXIT; |
b123971f | 104 | } |
105 | EXPORT_SYMBOL(__taskq_destroy); | |
106 | ||
107 | void | |
108 | __taskq_wait(taskq_t *tq) | |
109 | { | |
937879f1 | 110 | ENTRY; |
b123971f | 111 | flush_workqueue(tq); |
937879f1 | 112 | EXIT; |
b123971f | 113 | } |
114 | EXPORT_SYMBOL(__taskq_wait); |