4 * Copyright(c) 2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rte_malloc.h>
51 #include <rte_atomic_64.h>
53 #include "lthread_tls.h"
54 #include "lthread_queue.h"
55 #include "lthread_objcache.h"
56 #include "lthread_sched.h"
58 static struct rte_ring
*key_pool
;
59 static uint64_t key_pool_init
;
61 /* needed to cause section start and end to be defined */
62 RTE_DEFINE_PER_LTHREAD(void *, dummy
);
64 static struct lthread_key key_table
[LTHREAD_MAX_KEYS
];
66 void lthread_tls_ctor(void) __attribute__((constructor
));
68 void lthread_tls_ctor(void)
75 * Initialize a pool of keys
76 * These are unique tokens that can be obtained by threads
77 * calling lthread_key_create()
79 void _lthread_key_pool_init(void)
81 static struct rte_ring
*pool
;
82 struct lthread_key
*new_key
;
83 char name
[MAX_LTHREAD_NAME_SIZE
];
85 bzero(key_table
, sizeof(key_table
));
87 /* only one lcore should do this */
88 if (rte_atomic64_cmpset(&key_pool_init
, 0, 1)) {
91 MAX_LTHREAD_NAME_SIZE
,
92 "lthread_key_pool_%d",
95 pool
= rte_ring_create(name
,
96 LTHREAD_MAX_KEYS
, 0, 0);
101 for (i
= 1; i
< LTHREAD_MAX_KEYS
; i
++) {
102 new_key
= &key_table
[i
];
103 rte_ring_mp_enqueue((struct rte_ring
*)pool
,
108 /* other lcores wait here till done */
109 while (key_pool
== NULL
) {
110 rte_compiler_barrier();
117 * this means getting a key from the the pool
119 int lthread_key_create(unsigned int *key
, tls_destructor_func destructor
)
122 return POSIX_ERRNO(EINVAL
);
124 struct lthread_key
*new_key
;
126 if (rte_ring_mc_dequeue((struct rte_ring
*)key_pool
, (void **)&new_key
)
128 new_key
->destructor
= destructor
;
129 *key
= (new_key
- key_table
);
133 return POSIX_ERRNO(EAGAIN
);
140 int lthread_key_delete(unsigned int k
)
142 struct lthread_key
*key
;
144 key
= (struct lthread_key
*) &key_table
[k
];
146 if (k
> LTHREAD_MAX_KEYS
)
147 return POSIX_ERRNO(EINVAL
);
149 key
->destructor
= NULL
;
150 rte_ring_mp_enqueue((struct rte_ring
*)key_pool
,
158 * Break association for all keys in use by this thread
159 * invoke the destructor if available.
160 * Since a destructor can create keys we could enter an infinite loop
161 * therefore we give up after LTHREAD_DESTRUCTOR_ITERATIONS
162 * the behavior is modelled on pthread
164 void _lthread_tls_destroy(struct lthread
*lt
)
170 for (i
= 0; i
< LTHREAD_DESTRUCTOR_ITERATIONS
; i
++) {
172 for (k
= 1; k
< LTHREAD_MAX_KEYS
; k
++) {
174 /* no keys in use ? */
175 nb_keys
= lt
->tls
->nb_keys_inuse
;
179 /* this key not in use ? */
180 if (lt
->tls
->data
[k
] == NULL
)
183 /* remove this key */
184 data
= lt
->tls
->data
[k
];
185 lt
->tls
->data
[k
] = NULL
;
186 lt
->tls
->nb_keys_inuse
= nb_keys
-1;
188 /* invoke destructor */
189 if (key_table
[k
].destructor
!= NULL
)
190 key_table
[k
].destructor(data
);
196 * Return the pointer associated with a key
197 * If the key is no longer valid return NULL
200 *lthread_getspecific(unsigned int k
)
203 if (k
> LTHREAD_MAX_KEYS
)
206 return THIS_LTHREAD
->tls
->data
[k
];
210 * Set a value against a key
211 * If the key is no longer valid return an error
214 int lthread_setspecific(unsigned int k
, const void *data
)
216 if (k
> LTHREAD_MAX_KEYS
)
217 return POSIX_ERRNO(EINVAL
);
219 int n
= THIS_LTHREAD
->tls
->nb_keys_inuse
;
221 /* discard const qualifier */
222 char *p
= (char *) (uintptr_t) data
;
226 if (THIS_LTHREAD
->tls
->data
[k
] == NULL
)
227 THIS_LTHREAD
->tls
->nb_keys_inuse
= n
+1;
230 THIS_LTHREAD
->tls
->data
[k
] = (void *) p
;
235 * Allocate data for TLS cache
237 void _lthread_tls_alloc(struct lthread
*lt
)
239 struct lthread_tls
*tls
;
241 tls
= _lthread_objcache_alloc((THIS_SCHED
)->tls_cache
);
243 RTE_ASSERT(tls
!= NULL
);
245 tls
->root_sched
= (THIS_SCHED
);
248 /* allocate data for TLS varaiables using RTE_PER_LTHREAD macros */
249 if (sizeof(void *) < (uint64_t)RTE_PER_LTHREAD_SECTION_SIZE
) {
250 lt
->per_lthread_data
=
251 _lthread_objcache_alloc((THIS_SCHED
)->per_lthread_cache
);