]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/examples/performance-thread/common/lthread_api.h
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / examples / performance-thread / common / lthread_api.h
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2015 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Some portions of this software may have been derived from the
36 * https://github.com/halayli/lthread which carrys the following license.
37 *
38 * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62 /**
63 * @file lthread_api.h
64 *
65 * @warning
66 * @b EXPERIMENTAL: this API may change without prior notice
67 *
68 * This file contains the public API for the L-thread subsystem
69 *
70 * The L_thread subsystem provides a simple cooperative scheduler to
71 * enable arbitrary functions to run as cooperative threads within a
72 * single P-thread.
73 *
74 * The subsystem provides a P-thread like API that is intended to assist in
75 * reuse of legacy code written for POSIX p_threads.
76 *
77 * The L-thread subsystem relies on cooperative multitasking, as such
78 * an L-thread must possess frequent rescheduling points. Often these
79 * rescheduling points are provided transparently when the application
80 * invokes an L-thread API.
81 *
82 * In some applications it is possible that the program may enter a loop the
83 * exit condition for which depends on the action of another thread or a
84 * response from hardware. In such a case it is necessary to yield the thread
85 * periodically in the loop body, to allow other threads an opportunity to
86 * run. This can be done by inserting a call to lthread_yield() or
87 * lthread_sleep(n) in the body of the loop.
88 *
89 * If the application makes expensive / blocking system calls or does other
90 * work that would take an inordinate amount of time to complete, this will
91 * stall the cooperative scheduler resulting in very poor performance.
92 *
93 * In such cases an L-thread can be migrated temporarily to another scheduler
94 * running in a different P-thread on another core. When the expensive or
95 * blocking operation is completed it can be migrated back to the original
96 * scheduler. In this way other threads can continue to run on the original
97 * scheduler and will be completely unaffected by the blocking behaviour.
98 * To migrate an L-thread to another scheduler the API lthread_set_affinity()
99 * is provided.
100 *
101 * If L-threads that share data are running on the same core it is possible
102 * to design programs where mutual exclusion mechanisms to protect shared data
103 * can be avoided. This is due to the fact that the cooperative threads cannot
104 * preempt each other.
105 *
106 * There are two cases where mutual exclusion mechanisms are necessary.
107 *
108 * a) Where the L-threads sharing data are running on different cores.
109 * b) Where code must yield while updating data shared with another thread.
110 *
111 * The L-thread subsystem provides a set of mutex APIs to help with such
112 * scenarios, however excessive reliance on on these will impact performance
113 * and is best avoided if possible.
114 *
115 * L-threads can synchronise using a fast condition variable implementation
116 * that supports signal and broadcast. An L-thread running on any core can
117 * wait on a condition.
118 *
119 * L-threads can have L-thread local storage with an API modelled on either the
120 * P-thread get/set specific API or using PER_LTHREAD macros modelled on the
121 * RTE_PER_LCORE macros. Alternatively a simple user data pointer may be set
122 * and retrieved from a thread.
123 */
124 #ifndef LTHREAD_H
125 #define LTHREAD_H
126
127 #include <stdint.h>
128 #include <sys/socket.h>
129 #include <fcntl.h>
130 #include <netinet/in.h>
131
132 #include <rte_cycles.h>
133
134
135 struct lthread;
136 struct lthread_cond;
137 struct lthread_mutex;
138
139 struct lthread_condattr;
140 struct lthread_mutexattr;
141
142 typedef void (*lthread_func_t) (void *);
143
144 /*
145 * Define the size of stack for an lthread
146 * Then this is the size that will be allocated on lthread creation
147 * This is a fixed size and will not grow.
148 */
149 #define LTHREAD_MAX_STACK_SIZE (1024*64)
150
151 /**
152 * Define the maximum number of TLS keys that can be created
153 *
154 */
155 #define LTHREAD_MAX_KEYS 1024
156
157 /**
158 * Define the maximum number of attempts to destroy an lthread's
159 * TLS data on thread exit
160 */
161 #define LTHREAD_DESTRUCTOR_ITERATIONS 4
162
163
164 /**
165 * Define the maximum number of lcores that will support lthreads
166 */
167 #define LTHREAD_MAX_LCORES RTE_MAX_LCORE
168
169 /**
170 * How many lthread objects to pre-allocate as the system grows
171 * applies to lthreads + stacks, TLS, mutexs, cond vars.
172 *
173 * @see _lthread_alloc()
174 * @see _cond_alloc()
175 * @see _mutex_alloc()
176 *
177 */
178 #define LTHREAD_PREALLOC 100
179
180 /**
181 * Set the number of schedulers in the system.
182 *
183 * This function may optionally be called before starting schedulers.
184 *
185 * If the number of schedulers is not set, or set to 0 then each scheduler
186 * will begin scheduling lthreads immediately it is started.
187
188 * If the number of schedulers is set to greater than 0, then each scheduler
189 * will wait until all schedulers have started before beginning to schedule
190 * lthreads.
191 *
192 * If an application wishes to have threads migrate between cores using
193 * lthread_set_affinity(), or join threads running on other cores using
194 * lthread_join(), then it is prudent to set the number of schedulers to ensure
195 * that all schedulers are initialised beforehand.
196 *
197 * @param num
198 * the number of schedulers in the system
199 * @return
200 * the number of schedulers in the system
201 */
202 int lthread_num_schedulers_set(int num);
203
204 /**
205 * Return the number of schedulers currently running
206 * @return
207 * the number of schedulers in the system
208 */
209 int lthread_active_schedulers(void);
210
211 /**
212 * Shutdown the specified scheduler
213 *
214 * This function tells the specified scheduler to
215 * exit if/when there is no more work to do.
216 *
217 * Note that although the scheduler will stop
218 * resources are not freed.
219 *
220 * @param lcore
221 * The lcore of the scheduler to shutdown
222 *
223 * @return
224 * none
225 */
226 void lthread_scheduler_shutdown(unsigned lcore);
227
228 /**
229 * Shutdown all schedulers
230 *
231 * This function tells all schedulers including the current scheduler to
232 * exit if/when there is no more work to do.
233 *
234 * Note that although the schedulers will stop
235 * resources are not freed.
236 *
237 * @return
238 * none
239 */
240 void lthread_scheduler_shutdown_all(void);
241
242 /**
243 * Run the lthread scheduler
244 *
245 * Runs the lthread scheduler.
246 * This function returns only if/when all lthreads have exited.
247 * This function must be the main loop of an EAL thread.
248 *
249 * @return
250 * none
251 */
252
253 void lthread_run(void);
254
255 /**
256 * Create an lthread
257 *
258 * Creates an lthread and places it in the ready queue on a particular
259 * lcore.
260 *
261 * If no scheduler exists yet on the curret lcore then one is created.
262 *
263 * @param new_lt
264 * Pointer to an lthread pointer that will be initialized
265 * @param lcore
266 * the lcore the thread should be started on or the current clore
267 * -1 the current lcore
268 * 0 - LTHREAD_MAX_LCORES any other lcore
269 * @param lthread_func
270 * Pointer to the function the for the thread to run
271 * @param arg
272 * Pointer to args that will be passed to the thread
273 *
274 * @return
275 * 0 success
276 * EAGAIN no resources available
277 * EINVAL NULL thread or function pointer, or lcore_id out of range
278 */
279 int
280 lthread_create(struct lthread **new_lt,
281 int lcore, lthread_func_t func, void *arg);
282
283 /**
284 * Cancel an lthread
285 *
286 * Cancels an lthread and causes it to be terminated
287 * If the lthread is detached it will be freed immediately
288 * otherwise its resources will not be released until it is joined.
289 *
290 * @param new_lt
291 * Pointer to an lthread that will be cancelled
292 *
293 * @return
294 * 0 success
295 * EINVAL thread was NULL
296 */
297 int lthread_cancel(struct lthread *lt);
298
299 /**
300 * Join an lthread
301 *
302 * Joins the current thread with the specified lthread, and waits for that
303 * thread to exit.
304 * Passes an optional pointer to collect returned data.
305 *
306 * @param lt
307 * Pointer to the lthread to be joined
308 * @param ptr
309 * Pointer to pointer to collect returned data
310 *
311 0 * @return
312 * 0 success
313 * EINVAL lthread could not be joined.
314 */
315 int lthread_join(struct lthread *lt, void **ptr);
316
317 /**
318 * Detach an lthread
319 *
320 * Detaches the current thread
321 * On exit a detached lthread will be freed immediately and will not wait
322 * to be joined. The default state for a thread is not detached.
323 *
324 * @return
325 * none
326 */
327 void lthread_detach(void);
328
329 /**
330 * Exit an lthread
331 *
332 * Terminate the current thread, optionally return data.
333 * The data may be collected by lthread_join()
334 *
335 * After calling this function the lthread will be suspended until it is
336 * joined. After it is joined then its resources will be freed.
337 *
338 * @param ptr
339 * Pointer to pointer to data to be returned
340 *
341 * @return
342 * none
343 */
344 void lthread_exit(void *val);
345
346 /**
347 * Cause the current lthread to sleep for n nanoseconds
348 *
349 * The current thread will be suspended until the specified time has elapsed
350 * or has been exceeded.
351 *
352 * Execution will switch to the next lthread that is ready to run
353 *
354 * @param nsecs
355 * Number of nanoseconds to sleep
356 *
357 * @return
358 * none
359 */
360 void lthread_sleep(uint64_t nsecs);
361
362 /**
363 * Cause the current lthread to sleep for n cpu clock ticks
364 *
365 * The current thread will be suspended until the specified time has elapsed
366 * or has been exceeded.
367 *
368 * Execution will switch to the next lthread that is ready to run
369 *
370 * @param clks
371 * Number of clock ticks to sleep
372 *
373 * @return
374 * none
375 */
376 void lthread_sleep_clks(uint64_t clks);
377
378 /**
379 * Yield the current lthread
380 *
381 * The current thread will yield and execution will switch to the
382 * next lthread that is ready to run
383 *
384 * @return
385 * none
386 */
387 void lthread_yield(void);
388
389 /**
390 * Migrate the current thread to another scheduler
391 *
392 * This function migrates the current thread to another scheduler.
393 * Execution will switch to the next lthread that is ready to run on the
394 * current scheduler. The current thread will be resumed on the new scheduler.
395 *
396 * @param lcore
397 * The lcore to migrate to
398 *
399 * @return
400 * 0 success we are now running on the specified core
401 * EINVAL the destination lcore was not valid
402 */
403 int lthread_set_affinity(unsigned lcore);
404
405 /**
406 * Return the current lthread
407 *
408 * Returns the current lthread
409 *
410 * @return
411 * pointer to the current lthread
412 */
413 struct lthread
414 *lthread_current(void);
415
416 /**
417 * Associate user data with an lthread
418 *
419 * This function sets a user data pointer in the current lthread
420 * The pointer can be retrieved with lthread_get_data()
421 * It is the users responsibility to allocate and free any data referenced
422 * by the user pointer.
423 *
424 * @param data
425 * pointer to user data
426 *
427 * @return
428 * none
429 */
430 void lthread_set_data(void *data);
431
432 /**
433 * Get user data for the current lthread
434 *
435 * This function returns a user data pointer for the current lthread
436 * The pointer must first be set with lthread_set_data()
437 * It is the users responsibility to allocate and free any data referenced
438 * by the user pointer.
439 *
440 * @return
441 * pointer to user data
442 */
443 void
444 *lthread_get_data(void);
445
446 struct lthread_key;
447 typedef void (*tls_destructor_func) (void *);
448
449 /**
450 * Create a key for lthread TLS
451 *
452 * This function is modelled on pthread_key_create
453 * It creates a thread-specific data key visible to all lthreads on the
454 * current scheduler.
455 *
456 * Key values may be used to locate thread-specific data.
457 * The same key value may be used by different threads, the values bound
458 * to the key by lthread_setspecific() are maintained on a per-thread
459 * basis and persist for the life of the calling thread.
460 *
461 * An optional destructor function may be associated with each key value.
462 * At thread exit, if a key value has a non-NULL destructor pointer, and the
463 * thread has a non-NULL value associated with the key, the function pointed
464 * to is called with the current associated value as its sole argument.
465 *
466 * @param key
467 * Pointer to the key to be created
468 * @param destructor
469 * Pointer to destructor function
470 *
471 * @return
472 * 0 success
473 * EINVAL the key ptr was NULL
474 * EAGAIN no resources available
475 */
476 int lthread_key_create(unsigned int *key, tls_destructor_func destructor);
477
478 /**
479 * Delete key for lthread TLS
480 *
481 * This function is modelled on pthread_key_delete().
482 * It deletes a thread-specific data key previously returned by
483 * lthread_key_create().
484 * The thread-specific data values associated with the key need not be NULL
485 * at the time that lthread_key_delete is called.
486 * It is the responsibility of the application to free any application
487 * storage or perform any cleanup actions for data structures related to the
488 * deleted key. This cleanup can be done either before or after
489 * lthread_key_delete is called.
490 *
491 * @param key
492 * The key to be deleted
493 *
494 * @return
495 * 0 Success
496 * EINVAL the key was invalid
497 */
498 int lthread_key_delete(unsigned int key);
499
500 /**
501 * Get lthread TLS
502 *
503 * This function is modelled on pthread_get_specific().
504 * It returns the value currently bound to the specified key on behalf of the
505 * calling thread. Calling lthread_getspecific() with a key value not
506 * obtained from lthread_key_create() or after key has been deleted with
507 * lthread_key_delete() will result in undefined behaviour.
508 * lthread_getspecific() may be called from a thread-specific data destructor
509 * function.
510 *
511 * @param key
512 * The key for which data is requested
513 *
514 * @return
515 * Pointer to the thread specific data associated with that key
516 * or NULL if no data has been set.
517 */
518 void
519 *lthread_getspecific(unsigned int key);
520
521 /**
522 * Set lthread TLS
523 *
524 * This function is modelled on pthread_set_sepcific()
525 * It associates a thread-specific value with a key obtained via a previous
526 * call to lthread_key_create().
527 * Different threads may bind different values to the same key. These values
528 * are typically pointers to dynamically allocated memory that have been
529 * reserved by the calling thread. Calling lthread_setspecific with a key
530 * value not obtained from lthread_key_create or after the key has been
531 * deleted with lthread_key_delete will result in undefined behaviour.
532 *
533 * @param key
534 * The key for which data is to be set
535 * @param key
536 * Pointer to the user data
537 *
538 * @return
539 * 0 success
540 * EINVAL the key was invalid
541 */
542
543 int lthread_setspecific(unsigned int key, const void *value);
544
545 /**
546 * The macros below provide an alternative mechanism to access lthread local
547 * storage.
548 *
549 * The macros can be used to declare define and access per lthread local
550 * storage in a similar way to the RTE_PER_LCORE macros which control storage
551 * local to an lcore.
552 *
553 * Memory for per lthread variables declared in this way is allocated when the
554 * lthread is created and a pointer to this memory is stored in the lthread.
555 * The per lthread variables are accessed via the pointer + the offset of the
556 * particular variable.
557 *
558 * The total size of per lthread storage, and the variable offsets are found by
559 * defining the variables in a unique global memory section, the start and end
560 * of which is known. This global memory section is used only in the
561 * computation of the addresses of the lthread variables, and is never actually
562 * used to store any data.
563 *
564 * Due to the fact that variables declared this way may be scattered across
565 * many files, the start and end of the section and variable offsets are only
566 * known after linking, thus the computation of section size and variable
567 * addresses is performed at run time.
568 *
569 * These macros are primarily provided to aid porting of code that makes use
570 * of the existing RTE_PER_LCORE macros. In principle it would be more efficient
571 * to gather all lthread local variables into a single structure and
572 * set/retrieve a pointer to that struct using the alternative
573 * lthread_data_set/get APIs.
574 *
575 * These macros are mutually exclusive with the lthread_data_set/get APIs.
576 * If you define storage using these macros then the lthread_data_set/get APIs
577 * will not perform as expected, the lthread_data_set API does nothing, and the
578 * lthread_data_get API returns the start of global section.
579 *
580 */
581 /* start and end of per lthread section */
582 extern char __start_per_lt;
583 extern char __stop_per_lt;
584
585
586 #define RTE_DEFINE_PER_LTHREAD(type, name) \
587 __typeof__(type)__attribute((section("per_lt"))) per_lt_##name
588
589 /**
590 * Macro to declare an extern per lthread variable "var" of type "type"
591 */
592 #define RTE_DECLARE_PER_LTHREAD(type, name) \
593 extern __typeof__(type)__attribute((section("per_lt"))) per_lt_##name
594
595 /**
596 * Read/write the per-lcore variable value
597 */
598 #define RTE_PER_LTHREAD(name) ((typeof(per_lt_##name) *)\
599 ((char *)lthread_get_data() +\
600 ((char *) &per_lt_##name - &__start_per_lt)))
601
602 /**
603 * Initialize a mutex
604 *
605 * This function provides a mutual exclusion device, the need for which
606 * can normally be avoided in a cooperative multitasking environment.
607 * It is provided to aid porting of legacy code originally written for
608 * preemptive multitasking environments such as pthreads.
609 *
610 * A mutex may be unlocked (not owned by any thread), or locked (owned by
611 * one thread).
612 *
613 * A mutex can never be owned by more than one thread simultaneously.
614 * A thread attempting to lock a mutex that is already locked by another
615 * thread is suspended until the owning thread unlocks the mutex.
616 *
617 * lthread_mutex_init() initializes the mutex object pointed to by mutex
618 * Optional mutex attributes specified in mutexattr, are reserved for future
619 * use and are currently ignored.
620 *
621 * If a thread calls lthread_mutex_lock() on the mutex, then if the mutex
622 * is currently unlocked, it becomes locked and owned by the calling
623 * thread, and lthread_mutex_lock returns immediately. If the mutex is
624 * already locked by another thread, lthread_mutex_lock suspends the calling
625 * thread until the mutex is unlocked.
626 *
627 * lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except
628 * that it does not block the calling thread if the mutex is already locked
629 * by another thread.
630 *
631 * lthread_mutex_unlock() unlocks the specified mutex. The mutex is assumed
632 * to be locked and owned by the calling thread.
633 *
634 * lthread_mutex_destroy() destroys a mutex object, freeing its resources.
635 * The mutex must be unlocked with nothing blocked on it before calling
636 * lthread_mutex_destroy.
637 *
638 * @param name
639 * Optional pointer to string describing the mutex
640 * @param mutex
641 * Pointer to pointer to the mutex to be initialized
642 * @param attribute
643 * Pointer to attribute - unused reserved
644 *
645 * @return
646 * 0 success
647 * EINVAL mutex was not a valid pointer
648 * EAGAIN insufficient resources
649 */
650
651 int
652 lthread_mutex_init(char *name, struct lthread_mutex **mutex,
653 const struct lthread_mutexattr *attr);
654
655 /**
656 * Destroy a mutex
657 *
658 * This function destroys the specified mutex freeing its resources.
659 * The mutex must be unlocked before calling lthread_mutex_destroy.
660 *
661 * @see lthread_mutex_init()
662 *
663 * @param mutex
664 * Pointer to pointer to the mutex to be initialized
665 *
666 * @return
667 * 0 success
668 * EINVAL mutex was not an initialized mutex
669 * EBUSY mutex was still in use
670 */
671 int lthread_mutex_destroy(struct lthread_mutex *mutex);
672
673 /**
674 * Lock a mutex
675 *
676 * This function attempts to lock a mutex.
677 * If a thread calls lthread_mutex_lock() on the mutex, then if the mutex
678 * is currently unlocked, it becomes locked and owned by the calling
679 * thread, and lthread_mutex_lock returns immediately. If the mutex is
680 * already locked by another thread, lthread_mutex_lock suspends the calling
681 * thread until the mutex is unlocked.
682 *
683 * @see lthread_mutex_init()
684 *
685 * @param mutex
686 * Pointer to pointer to the mutex to be initialized
687 *
688 * @return
689 * 0 success
690 * EINVAL mutex was not an initialized mutex
691 * EDEADLOCK the mutex was already owned by the calling thread
692 */
693
694 int lthread_mutex_lock(struct lthread_mutex *mutex);
695
696 /**
697 * Try to lock a mutex
698 *
699 * This function attempts to lock a mutex.
700 * lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except
701 * that it does not block the calling thread if the mutex is already locked
702 * by another thread.
703 *
704 *
705 * @see lthread_mutex_init()
706 *
707 * @param mutex
708 * Pointer to pointer to the mutex to be initialized
709 *
710 * @return
711 * 0 success
712 * EINVAL mutex was not an initialized mutex
713 * EBUSY the mutex was already locked by another thread
714 */
715 int lthread_mutex_trylock(struct lthread_mutex *mutex);
716
717 /**
718 * Unlock a mutex
719 *
720 * This function attempts to unlock the specified mutex. The mutex is assumed
721 * to be locked and owned by the calling thread.
722 *
723 * The oldest of any threads blocked on the mutex is made ready and may
724 * compete with any other running thread to gain the mutex, it fails it will
725 * be blocked again.
726 *
727 * @param mutex
728 * Pointer to pointer to the mutex to be initialized
729 *
730 * @return
731 * 0 mutex was unlocked
732 * EINVAL mutex was not an initialized mutex
733 * EPERM the mutex was not owned by the calling thread
734 */
735
736 int lthread_mutex_unlock(struct lthread_mutex *mutex);
737
738 /**
739 * Initialize a condition variable
740 *
741 * This function initializes a condition variable.
742 *
743 * Condition variables can be used to communicate changes in the state of data
744 * shared between threads.
745 *
746 * @see lthread_cond_wait()
747 *
748 * @param name
749 * Pointer to optional string describing the condition variable
750 * @param c
751 * Pointer to pointer to the condition variable to be initialized
752 * @param attr
753 * Pointer to optional attribute reserved for future use, currently ignored
754 *
755 * @return
756 * 0 success
757 * EINVAL cond was not a valid pointer
758 * EAGAIN insufficient resources
759 */
760 int
761 lthread_cond_init(char *name, struct lthread_cond **c,
762 const struct lthread_condattr *attr);
763
764 /**
765 * Destroy a condition variable
766 *
767 * This function destroys a condition variable that was created with
768 * lthread_cond_init() and releases its resources.
769 *
770 * @param cond
771 * Pointer to pointer to the condition variable to be destroyed
772 *
773 * @return
774 * 0 Success
775 * EBUSY condition variable was still in use
776 * EINVAL was not an initialised condition variable
777 */
778 int lthread_cond_destroy(struct lthread_cond *cond);
779
780 /**
781 * Wait on a condition variable
782 *
783 * The function blocks the current thread waiting on the condition variable
784 * specified by cond. The waiting thread unblocks only after another thread
785 * calls lthread_cond_signal, or lthread_cond_broadcast, specifying the
786 * same condition variable.
787 *
788 * @param cond
789 * Pointer to pointer to the condition variable to be waited on
790 *
791 * @param reserved
792 * reserved for future use
793 *
794 * @return
795 * 0 The condition was signalled ( Success )
796 * EINVAL was not a an initialised condition variable
797 */
798 int lthread_cond_wait(struct lthread_cond *c, uint64_t reserved);
799
800 /**
801 * Signal a condition variable
802 *
803 * The function unblocks one thread waiting for the condition variable cond.
804 * If no threads are waiting on cond, the rte_lthead_cond_signal() function
805 * has no effect.
806 *
807 * @param cond
808 * Pointer to pointer to the condition variable to be signalled
809 *
810 * @return
811 * 0 The condition was signalled ( Success )
812 * EINVAL was not a an initialised condition variable
813 */
814 int lthread_cond_signal(struct lthread_cond *c);
815
816 /**
817 * Broadcast a condition variable
818 *
819 * The function unblocks all threads waiting for the condition variable cond.
820 * If no threads are waiting on cond, the rte_lthead_cond_broadcast()
821 * function has no effect.
822 *
823 * @param cond
824 * Pointer to pointer to the condition variable to be signalled
825 *
826 * @return
827 * 0 The condition was signalled ( Success )
828 * EINVAL was not a an initialised condition variable
829 */
830 int lthread_cond_broadcast(struct lthread_cond *c);
831
832 #endif /* LTHREAD_H */