]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2002 Sistina Software (UK) Limited. | |
373a392b | 3 | * Copyright (C) 2006 Red Hat GmbH |
1da177e4 LT |
4 | * |
5 | * This file is released under the GPL. | |
6 | * | |
7 | * Kcopyd provides a simple interface for copying an area of one | |
8 | * block-device to one or more other block-devices, with an asynchronous | |
9 | * completion notification. | |
10 | */ | |
11 | ||
715b49ef | 12 | #include <asm/types.h> |
1da177e4 LT |
13 | #include <asm/atomic.h> |
14 | ||
15 | #include <linux/blkdev.h> | |
1da177e4 LT |
16 | #include <linux/fs.h> |
17 | #include <linux/init.h> | |
18 | #include <linux/list.h> | |
19 | #include <linux/mempool.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/pagemap.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/vmalloc.h> | |
24 | #include <linux/workqueue.h> | |
48c9c27b | 25 | #include <linux/mutex.h> |
1da177e4 LT |
26 | |
27 | #include "kcopyd.h" | |
22a1ceb1 | 28 | #include "dm.h" |
1da177e4 LT |
29 | |
30 | static struct workqueue_struct *_kcopyd_wq; | |
31 | static struct work_struct _kcopyd_work; | |
32 | ||
028867ac | 33 | static void wake(void) |
1da177e4 LT |
34 | { |
35 | queue_work(_kcopyd_wq, &_kcopyd_work); | |
36 | } | |
37 | ||
38 | /*----------------------------------------------------------------- | |
39 | * Each kcopyd client has its own little pool of preallocated | |
40 | * pages for kcopyd io. | |
41 | *---------------------------------------------------------------*/ | |
42 | struct kcopyd_client { | |
43 | struct list_head list; | |
44 | ||
45 | spinlock_t lock; | |
46 | struct page_list *pages; | |
47 | unsigned int nr_pages; | |
48 | unsigned int nr_free_pages; | |
138728dc | 49 | |
373a392b MB |
50 | struct dm_io_client *io_client; |
51 | ||
138728dc AK |
52 | wait_queue_head_t destroyq; |
53 | atomic_t nr_jobs; | |
1da177e4 LT |
54 | }; |
55 | ||
56 | static struct page_list *alloc_pl(void) | |
57 | { | |
58 | struct page_list *pl; | |
59 | ||
60 | pl = kmalloc(sizeof(*pl), GFP_KERNEL); | |
61 | if (!pl) | |
62 | return NULL; | |
63 | ||
64 | pl->page = alloc_page(GFP_KERNEL); | |
65 | if (!pl->page) { | |
66 | kfree(pl); | |
67 | return NULL; | |
68 | } | |
69 | ||
70 | return pl; | |
71 | } | |
72 | ||
73 | static void free_pl(struct page_list *pl) | |
74 | { | |
75 | __free_page(pl->page); | |
76 | kfree(pl); | |
77 | } | |
78 | ||
79 | static int kcopyd_get_pages(struct kcopyd_client *kc, | |
80 | unsigned int nr, struct page_list **pages) | |
81 | { | |
82 | struct page_list *pl; | |
83 | ||
84 | spin_lock(&kc->lock); | |
85 | if (kc->nr_free_pages < nr) { | |
86 | spin_unlock(&kc->lock); | |
87 | return -ENOMEM; | |
88 | } | |
89 | ||
90 | kc->nr_free_pages -= nr; | |
91 | for (*pages = pl = kc->pages; --nr; pl = pl->next) | |
92 | ; | |
93 | ||
94 | kc->pages = pl->next; | |
95 | pl->next = NULL; | |
96 | ||
97 | spin_unlock(&kc->lock); | |
98 | ||
99 | return 0; | |
100 | } | |
101 | ||
102 | static void kcopyd_put_pages(struct kcopyd_client *kc, struct page_list *pl) | |
103 | { | |
104 | struct page_list *cursor; | |
105 | ||
106 | spin_lock(&kc->lock); | |
107 | for (cursor = pl; cursor->next; cursor = cursor->next) | |
108 | kc->nr_free_pages++; | |
109 | ||
110 | kc->nr_free_pages++; | |
111 | cursor->next = kc->pages; | |
112 | kc->pages = pl; | |
113 | spin_unlock(&kc->lock); | |
114 | } | |
115 | ||
116 | /* | |
117 | * These three functions resize the page pool. | |
118 | */ | |
119 | static void drop_pages(struct page_list *pl) | |
120 | { | |
121 | struct page_list *next; | |
122 | ||
123 | while (pl) { | |
124 | next = pl->next; | |
125 | free_pl(pl); | |
126 | pl = next; | |
127 | } | |
128 | } | |
129 | ||
130 | static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr) | |
131 | { | |
132 | unsigned int i; | |
133 | struct page_list *pl = NULL, *next; | |
134 | ||
135 | for (i = 0; i < nr; i++) { | |
136 | next = alloc_pl(); | |
137 | if (!next) { | |
138 | if (pl) | |
139 | drop_pages(pl); | |
140 | return -ENOMEM; | |
141 | } | |
142 | next->next = pl; | |
143 | pl = next; | |
144 | } | |
145 | ||
146 | kcopyd_put_pages(kc, pl); | |
147 | kc->nr_pages += nr; | |
148 | return 0; | |
149 | } | |
150 | ||
151 | static void client_free_pages(struct kcopyd_client *kc) | |
152 | { | |
153 | BUG_ON(kc->nr_free_pages != kc->nr_pages); | |
154 | drop_pages(kc->pages); | |
155 | kc->pages = NULL; | |
156 | kc->nr_free_pages = kc->nr_pages = 0; | |
157 | } | |
158 | ||
159 | /*----------------------------------------------------------------- | |
160 | * kcopyd_jobs need to be allocated by the *clients* of kcopyd, | |
161 | * for this reason we use a mempool to prevent the client from | |
162 | * ever having to do io (which could cause a deadlock). | |
163 | *---------------------------------------------------------------*/ | |
164 | struct kcopyd_job { | |
165 | struct kcopyd_client *kc; | |
166 | struct list_head list; | |
167 | unsigned long flags; | |
168 | ||
169 | /* | |
170 | * Error state of the job. | |
171 | */ | |
172 | int read_err; | |
4cdc1d1f | 173 | unsigned long write_err; |
1da177e4 LT |
174 | |
175 | /* | |
176 | * Either READ or WRITE | |
177 | */ | |
178 | int rw; | |
22a1ceb1 | 179 | struct dm_io_region source; |
1da177e4 LT |
180 | |
181 | /* | |
182 | * The destinations for the transfer. | |
183 | */ | |
184 | unsigned int num_dests; | |
22a1ceb1 | 185 | struct dm_io_region dests[KCOPYD_MAX_REGIONS]; |
1da177e4 LT |
186 | |
187 | sector_t offset; | |
188 | unsigned int nr_pages; | |
189 | struct page_list *pages; | |
190 | ||
191 | /* | |
192 | * Set this to ensure you are notified when the job has | |
193 | * completed. 'context' is for callback to use. | |
194 | */ | |
195 | kcopyd_notify_fn fn; | |
196 | void *context; | |
197 | ||
198 | /* | |
199 | * These fields are only used if the job has been split | |
200 | * into more manageable parts. | |
201 | */ | |
def5b5b2 | 202 | struct mutex lock; |
1da177e4 LT |
203 | atomic_t sub_jobs; |
204 | sector_t progress; | |
205 | }; | |
206 | ||
207 | /* FIXME: this should scale with the number of pages */ | |
208 | #define MIN_JOBS 512 | |
209 | ||
e18b890b | 210 | static struct kmem_cache *_job_cache; |
1da177e4 LT |
211 | static mempool_t *_job_pool; |
212 | ||
213 | /* | |
214 | * We maintain three lists of jobs: | |
215 | * | |
216 | * i) jobs waiting for pages | |
217 | * ii) jobs that have pages, and are waiting for the io to be issued. | |
218 | * iii) jobs that have completed. | |
219 | * | |
220 | * All three of these are protected by job_lock. | |
221 | */ | |
222 | static DEFINE_SPINLOCK(_job_lock); | |
223 | ||
224 | static LIST_HEAD(_complete_jobs); | |
225 | static LIST_HEAD(_io_jobs); | |
226 | static LIST_HEAD(_pages_jobs); | |
227 | ||
228 | static int jobs_init(void) | |
229 | { | |
028867ac | 230 | _job_cache = KMEM_CACHE(kcopyd_job, 0); |
1da177e4 LT |
231 | if (!_job_cache) |
232 | return -ENOMEM; | |
233 | ||
93d2341c | 234 | _job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); |
1da177e4 LT |
235 | if (!_job_pool) { |
236 | kmem_cache_destroy(_job_cache); | |
237 | return -ENOMEM; | |
238 | } | |
239 | ||
240 | return 0; | |
241 | } | |
242 | ||
243 | static void jobs_exit(void) | |
244 | { | |
245 | BUG_ON(!list_empty(&_complete_jobs)); | |
246 | BUG_ON(!list_empty(&_io_jobs)); | |
247 | BUG_ON(!list_empty(&_pages_jobs)); | |
248 | ||
249 | mempool_destroy(_job_pool); | |
250 | kmem_cache_destroy(_job_cache); | |
251 | _job_pool = NULL; | |
252 | _job_cache = NULL; | |
253 | } | |
254 | ||
255 | /* | |
256 | * Functions to push and pop a job onto the head of a given job | |
257 | * list. | |
258 | */ | |
028867ac | 259 | static struct kcopyd_job *pop(struct list_head *jobs) |
1da177e4 LT |
260 | { |
261 | struct kcopyd_job *job = NULL; | |
262 | unsigned long flags; | |
263 | ||
264 | spin_lock_irqsave(&_job_lock, flags); | |
265 | ||
266 | if (!list_empty(jobs)) { | |
267 | job = list_entry(jobs->next, struct kcopyd_job, list); | |
268 | list_del(&job->list); | |
269 | } | |
270 | spin_unlock_irqrestore(&_job_lock, flags); | |
271 | ||
272 | return job; | |
273 | } | |
274 | ||
028867ac | 275 | static void push(struct list_head *jobs, struct kcopyd_job *job) |
1da177e4 LT |
276 | { |
277 | unsigned long flags; | |
278 | ||
279 | spin_lock_irqsave(&_job_lock, flags); | |
280 | list_add_tail(&job->list, jobs); | |
281 | spin_unlock_irqrestore(&_job_lock, flags); | |
282 | } | |
283 | ||
284 | /* | |
285 | * These three functions process 1 item from the corresponding | |
286 | * job list. | |
287 | * | |
288 | * They return: | |
289 | * < 0: error | |
290 | * 0: success | |
291 | * > 0: can't process yet. | |
292 | */ | |
293 | static int run_complete_job(struct kcopyd_job *job) | |
294 | { | |
295 | void *context = job->context; | |
296 | int read_err = job->read_err; | |
4cdc1d1f | 297 | unsigned long write_err = job->write_err; |
1da177e4 | 298 | kcopyd_notify_fn fn = job->fn; |
138728dc | 299 | struct kcopyd_client *kc = job->kc; |
1da177e4 | 300 | |
138728dc | 301 | kcopyd_put_pages(kc, job->pages); |
1da177e4 LT |
302 | mempool_free(job, _job_pool); |
303 | fn(read_err, write_err, context); | |
138728dc AK |
304 | |
305 | if (atomic_dec_and_test(&kc->nr_jobs)) | |
306 | wake_up(&kc->destroyq); | |
307 | ||
1da177e4 LT |
308 | return 0; |
309 | } | |
310 | ||
311 | static void complete_io(unsigned long error, void *context) | |
312 | { | |
313 | struct kcopyd_job *job = (struct kcopyd_job *) context; | |
314 | ||
315 | if (error) { | |
316 | if (job->rw == WRITE) | |
ce503f59 | 317 | job->write_err |= error; |
1da177e4 LT |
318 | else |
319 | job->read_err = 1; | |
320 | ||
321 | if (!test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) { | |
322 | push(&_complete_jobs, job); | |
323 | wake(); | |
324 | return; | |
325 | } | |
326 | } | |
327 | ||
328 | if (job->rw == WRITE) | |
329 | push(&_complete_jobs, job); | |
330 | ||
331 | else { | |
332 | job->rw = WRITE; | |
333 | push(&_io_jobs, job); | |
334 | } | |
335 | ||
336 | wake(); | |
337 | } | |
338 | ||
339 | /* | |
340 | * Request io on as many buffer heads as we can currently get for | |
341 | * a particular job. | |
342 | */ | |
343 | static int run_io_job(struct kcopyd_job *job) | |
344 | { | |
345 | int r; | |
373a392b MB |
346 | struct dm_io_request io_req = { |
347 | .bi_rw = job->rw, | |
348 | .mem.type = DM_IO_PAGE_LIST, | |
349 | .mem.ptr.pl = job->pages, | |
350 | .mem.offset = job->offset, | |
351 | .notify.fn = complete_io, | |
352 | .notify.context = job, | |
353 | .client = job->kc->io_client, | |
354 | }; | |
1da177e4 LT |
355 | |
356 | if (job->rw == READ) | |
373a392b | 357 | r = dm_io(&io_req, 1, &job->source, NULL); |
1da177e4 | 358 | else |
373a392b | 359 | r = dm_io(&io_req, job->num_dests, job->dests, NULL); |
1da177e4 LT |
360 | |
361 | return r; | |
362 | } | |
363 | ||
364 | static int run_pages_job(struct kcopyd_job *job) | |
365 | { | |
366 | int r; | |
367 | ||
368 | job->nr_pages = dm_div_up(job->dests[0].count + job->offset, | |
369 | PAGE_SIZE >> 9); | |
370 | r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages); | |
371 | if (!r) { | |
372 | /* this job is ready for io */ | |
373 | push(&_io_jobs, job); | |
374 | return 0; | |
375 | } | |
376 | ||
377 | if (r == -ENOMEM) | |
378 | /* can't complete now */ | |
379 | return 1; | |
380 | ||
381 | return r; | |
382 | } | |
383 | ||
384 | /* | |
385 | * Run through a list for as long as possible. Returns the count | |
386 | * of successful jobs. | |
387 | */ | |
388 | static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *)) | |
389 | { | |
390 | struct kcopyd_job *job; | |
391 | int r, count = 0; | |
392 | ||
393 | while ((job = pop(jobs))) { | |
394 | ||
395 | r = fn(job); | |
396 | ||
397 | if (r < 0) { | |
398 | /* error this rogue job */ | |
399 | if (job->rw == WRITE) | |
4cdc1d1f | 400 | job->write_err = (unsigned long) -1L; |
1da177e4 LT |
401 | else |
402 | job->read_err = 1; | |
403 | push(&_complete_jobs, job); | |
404 | break; | |
405 | } | |
406 | ||
407 | if (r > 0) { | |
408 | /* | |
409 | * We couldn't service this job ATM, so | |
410 | * push this job back onto the list. | |
411 | */ | |
412 | push(jobs, job); | |
413 | break; | |
414 | } | |
415 | ||
416 | count++; | |
417 | } | |
418 | ||
419 | return count; | |
420 | } | |
421 | ||
422 | /* | |
423 | * kcopyd does this every time it's woken up. | |
424 | */ | |
c4028958 | 425 | static void do_work(struct work_struct *ignored) |
1da177e4 LT |
426 | { |
427 | /* | |
428 | * The order that these are called is *very* important. | |
429 | * complete jobs can free some pages for pages jobs. | |
430 | * Pages jobs when successful will jump onto the io jobs | |
431 | * list. io jobs call wake when they complete and it all | |
432 | * starts again. | |
433 | */ | |
434 | process_jobs(&_complete_jobs, run_complete_job); | |
435 | process_jobs(&_pages_jobs, run_pages_job); | |
436 | process_jobs(&_io_jobs, run_io_job); | |
437 | } | |
438 | ||
439 | /* | |
440 | * If we are copying a small region we just dispatch a single job | |
441 | * to do the copy, otherwise the io has to be split up into many | |
442 | * jobs. | |
443 | */ | |
444 | static void dispatch_job(struct kcopyd_job *job) | |
445 | { | |
138728dc | 446 | atomic_inc(&job->kc->nr_jobs); |
1da177e4 LT |
447 | push(&_pages_jobs, job); |
448 | wake(); | |
449 | } | |
450 | ||
451 | #define SUB_JOB_SIZE 128 | |
4cdc1d1f AK |
452 | static void segment_complete(int read_err, unsigned long write_err, |
453 | void *context) | |
1da177e4 LT |
454 | { |
455 | /* FIXME: tidy this function */ | |
456 | sector_t progress = 0; | |
457 | sector_t count = 0; | |
458 | struct kcopyd_job *job = (struct kcopyd_job *) context; | |
459 | ||
def5b5b2 | 460 | mutex_lock(&job->lock); |
1da177e4 LT |
461 | |
462 | /* update the error */ | |
463 | if (read_err) | |
464 | job->read_err = 1; | |
465 | ||
466 | if (write_err) | |
ce503f59 | 467 | job->write_err |= write_err; |
1da177e4 LT |
468 | |
469 | /* | |
470 | * Only dispatch more work if there hasn't been an error. | |
471 | */ | |
472 | if ((!job->read_err && !job->write_err) || | |
473 | test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) { | |
474 | /* get the next chunk of work */ | |
475 | progress = job->progress; | |
476 | count = job->source.count - progress; | |
477 | if (count) { | |
478 | if (count > SUB_JOB_SIZE) | |
479 | count = SUB_JOB_SIZE; | |
480 | ||
481 | job->progress += count; | |
482 | } | |
483 | } | |
def5b5b2 | 484 | mutex_unlock(&job->lock); |
1da177e4 LT |
485 | |
486 | if (count) { | |
487 | int i; | |
488 | struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO); | |
489 | ||
490 | *sub_job = *job; | |
491 | sub_job->source.sector += progress; | |
492 | sub_job->source.count = count; | |
493 | ||
494 | for (i = 0; i < job->num_dests; i++) { | |
495 | sub_job->dests[i].sector += progress; | |
496 | sub_job->dests[i].count = count; | |
497 | } | |
498 | ||
499 | sub_job->fn = segment_complete; | |
500 | sub_job->context = job; | |
501 | dispatch_job(sub_job); | |
502 | ||
503 | } else if (atomic_dec_and_test(&job->sub_jobs)) { | |
504 | ||
505 | /* | |
506 | * To avoid a race we must keep the job around | |
507 | * until after the notify function has completed. | |
508 | * Otherwise the client may try and stop the job | |
509 | * after we've completed. | |
510 | */ | |
511 | job->fn(read_err, write_err, job->context); | |
512 | mempool_free(job, _job_pool); | |
513 | } | |
514 | } | |
515 | ||
516 | /* | |
517 | * Create some little jobs that will do the move between | |
518 | * them. | |
519 | */ | |
520 | #define SPLIT_COUNT 8 | |
521 | static void split_job(struct kcopyd_job *job) | |
522 | { | |
523 | int i; | |
524 | ||
525 | atomic_set(&job->sub_jobs, SPLIT_COUNT); | |
526 | for (i = 0; i < SPLIT_COUNT; i++) | |
527 | segment_complete(0, 0u, job); | |
528 | } | |
529 | ||
22a1ceb1 HM |
530 | int kcopyd_copy(struct kcopyd_client *kc, struct dm_io_region *from, |
531 | unsigned int num_dests, struct dm_io_region *dests, | |
1da177e4 LT |
532 | unsigned int flags, kcopyd_notify_fn fn, void *context) |
533 | { | |
534 | struct kcopyd_job *job; | |
535 | ||
536 | /* | |
537 | * Allocate a new job. | |
538 | */ | |
539 | job = mempool_alloc(_job_pool, GFP_NOIO); | |
540 | ||
541 | /* | |
542 | * set up for the read. | |
543 | */ | |
544 | job->kc = kc; | |
545 | job->flags = flags; | |
546 | job->read_err = 0; | |
547 | job->write_err = 0; | |
548 | job->rw = READ; | |
549 | ||
550 | job->source = *from; | |
551 | ||
552 | job->num_dests = num_dests; | |
553 | memcpy(&job->dests, dests, sizeof(*dests) * num_dests); | |
554 | ||
555 | job->offset = 0; | |
556 | job->nr_pages = 0; | |
557 | job->pages = NULL; | |
558 | ||
559 | job->fn = fn; | |
560 | job->context = context; | |
561 | ||
562 | if (job->source.count < SUB_JOB_SIZE) | |
563 | dispatch_job(job); | |
564 | ||
565 | else { | |
def5b5b2 | 566 | mutex_init(&job->lock); |
1da177e4 LT |
567 | job->progress = 0; |
568 | split_job(job); | |
569 | } | |
570 | ||
571 | return 0; | |
572 | } | |
573 | ||
574 | /* | |
575 | * Cancels a kcopyd job, eg. someone might be deactivating a | |
576 | * mirror. | |
577 | */ | |
0b56306e | 578 | #if 0 |
1da177e4 LT |
579 | int kcopyd_cancel(struct kcopyd_job *job, int block) |
580 | { | |
581 | /* FIXME: finish */ | |
582 | return -1; | |
583 | } | |
0b56306e | 584 | #endif /* 0 */ |
1da177e4 LT |
585 | |
586 | /*----------------------------------------------------------------- | |
587 | * Unit setup | |
588 | *---------------------------------------------------------------*/ | |
48c9c27b | 589 | static DEFINE_MUTEX(_client_lock); |
1da177e4 LT |
590 | static LIST_HEAD(_clients); |
591 | ||
592 | static void client_add(struct kcopyd_client *kc) | |
593 | { | |
48c9c27b | 594 | mutex_lock(&_client_lock); |
1da177e4 | 595 | list_add(&kc->list, &_clients); |
48c9c27b | 596 | mutex_unlock(&_client_lock); |
1da177e4 LT |
597 | } |
598 | ||
599 | static void client_del(struct kcopyd_client *kc) | |
600 | { | |
48c9c27b | 601 | mutex_lock(&_client_lock); |
1da177e4 | 602 | list_del(&kc->list); |
48c9c27b | 603 | mutex_unlock(&_client_lock); |
1da177e4 LT |
604 | } |
605 | ||
14cc3e2b | 606 | static DEFINE_MUTEX(kcopyd_init_lock); |
1da177e4 LT |
607 | static int kcopyd_clients = 0; |
608 | ||
609 | static int kcopyd_init(void) | |
610 | { | |
611 | int r; | |
612 | ||
14cc3e2b | 613 | mutex_lock(&kcopyd_init_lock); |
1da177e4 LT |
614 | |
615 | if (kcopyd_clients) { | |
616 | /* Already initialized. */ | |
617 | kcopyd_clients++; | |
14cc3e2b | 618 | mutex_unlock(&kcopyd_init_lock); |
1da177e4 LT |
619 | return 0; |
620 | } | |
621 | ||
622 | r = jobs_init(); | |
623 | if (r) { | |
14cc3e2b | 624 | mutex_unlock(&kcopyd_init_lock); |
1da177e4 LT |
625 | return r; |
626 | } | |
627 | ||
628 | _kcopyd_wq = create_singlethread_workqueue("kcopyd"); | |
629 | if (!_kcopyd_wq) { | |
630 | jobs_exit(); | |
14cc3e2b | 631 | mutex_unlock(&kcopyd_init_lock); |
1da177e4 LT |
632 | return -ENOMEM; |
633 | } | |
634 | ||
635 | kcopyd_clients++; | |
c4028958 | 636 | INIT_WORK(&_kcopyd_work, do_work); |
14cc3e2b | 637 | mutex_unlock(&kcopyd_init_lock); |
1da177e4 LT |
638 | return 0; |
639 | } | |
640 | ||
641 | static void kcopyd_exit(void) | |
642 | { | |
14cc3e2b | 643 | mutex_lock(&kcopyd_init_lock); |
1da177e4 LT |
644 | kcopyd_clients--; |
645 | if (!kcopyd_clients) { | |
646 | jobs_exit(); | |
647 | destroy_workqueue(_kcopyd_wq); | |
648 | _kcopyd_wq = NULL; | |
649 | } | |
14cc3e2b | 650 | mutex_unlock(&kcopyd_init_lock); |
1da177e4 LT |
651 | } |
652 | ||
653 | int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result) | |
654 | { | |
655 | int r = 0; | |
656 | struct kcopyd_client *kc; | |
657 | ||
658 | r = kcopyd_init(); | |
659 | if (r) | |
660 | return r; | |
661 | ||
662 | kc = kmalloc(sizeof(*kc), GFP_KERNEL); | |
663 | if (!kc) { | |
664 | kcopyd_exit(); | |
665 | return -ENOMEM; | |
666 | } | |
667 | ||
668 | spin_lock_init(&kc->lock); | |
669 | kc->pages = NULL; | |
670 | kc->nr_pages = kc->nr_free_pages = 0; | |
671 | r = client_alloc_pages(kc, nr_pages); | |
672 | if (r) { | |
673 | kfree(kc); | |
674 | kcopyd_exit(); | |
675 | return r; | |
676 | } | |
677 | ||
373a392b MB |
678 | kc->io_client = dm_io_client_create(nr_pages); |
679 | if (IS_ERR(kc->io_client)) { | |
680 | r = PTR_ERR(kc->io_client); | |
1da177e4 LT |
681 | client_free_pages(kc); |
682 | kfree(kc); | |
683 | kcopyd_exit(); | |
684 | return r; | |
685 | } | |
686 | ||
138728dc AK |
687 | init_waitqueue_head(&kc->destroyq); |
688 | atomic_set(&kc->nr_jobs, 0); | |
689 | ||
1da177e4 LT |
690 | client_add(kc); |
691 | *result = kc; | |
692 | return 0; | |
693 | } | |
694 | ||
695 | void kcopyd_client_destroy(struct kcopyd_client *kc) | |
696 | { | |
138728dc AK |
697 | /* Wait for completion of all jobs submitted by this client. */ |
698 | wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); | |
699 | ||
373a392b | 700 | dm_io_client_destroy(kc->io_client); |
1da177e4 LT |
701 | client_free_pages(kc); |
702 | client_del(kc); | |
703 | kfree(kc); | |
704 | kcopyd_exit(); | |
705 | } | |
706 | ||
707 | EXPORT_SYMBOL(kcopyd_client_create); | |
708 | EXPORT_SYMBOL(kcopyd_client_destroy); | |
709 | EXPORT_SYMBOL(kcopyd_copy); |