3 * Copyright © 2012 Serge Hallyn <serge.hallyn@ubuntu.com>.
4 * Copyright © 2012 Canonical Ltd.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include <lxc/lxccontainer.h>
36 #ifdef MUTEX_DEBUGGING
40 #define MAX_STACKDEPTH 25
42 #define OFLAG (O_CREAT | O_RDWR)
45 #define SEMVALUE_LOCKED 0
47 lxc_log_define(lxc_lock
, lxc
);
49 #ifdef MUTEX_DEBUGGING
50 static pthread_mutex_t thread_mutex
= PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
;
52 static inline void dump_stacktrace(void)
54 void *array
[MAX_STACKDEPTH
];
59 size
= backtrace(array
, MAX_STACKDEPTH
);
60 strings
= backtrace_symbols(array
, size
);
62 // Using fprintf here as our logging module is not thread safe
63 fprintf(stderr
, "\tObtained %zd stack frames.\n", size
);
65 for (i
= 0; i
< size
; i
++)
66 fprintf(stderr
, "\t\t%s\n", strings
[i
]);
71 static pthread_mutex_t thread_mutex
= PTHREAD_MUTEX_INITIALIZER
;
73 static inline void dump_stacktrace(void) {;}
76 static void lock_mutex(pthread_mutex_t
*l
)
80 if ((ret
= pthread_mutex_lock(l
)) != 0) {
81 fprintf(stderr
, "pthread_mutex_lock returned:%d %s\n", ret
, strerror(ret
));
87 static void unlock_mutex(pthread_mutex_t
*l
)
91 if ((ret
= pthread_mutex_unlock(l
)) != 0) {
92 fprintf(stderr
, "pthread_mutex_unlock returned:%d %s\n", ret
, strerror(ret
));
98 static char *lxclock_name(const char *p
, const char *n
)
106 * "/run" + "/lxc/lock/$lxcpath/$lxcname + '\0' if root
108 * $XDG_RUNTIME_DIR + "/lxc/lock/$lxcpath/$lxcname + '\0' if non-root
111 /* length of "/lxc/lock/" + $lxcpath + "/" + "." + $lxcname + '\0' */
112 len
= strlen("/lxc/lock/") + strlen(n
) + strlen(p
) + 3;
113 rundir
= get_rundir();
116 len
+= strlen(rundir
);
118 if ((dest
= malloc(len
)) == NULL
) {
123 ret
= snprintf(dest
, len
, "%s/lxc/lock/%s", rundir
, p
);
124 if (ret
< 0 || ret
>= len
) {
129 ret
= mkdir_p(dest
, 0755);
136 ret
= snprintf(dest
, len
, "%s/lxc/lock/%s/.%s", rundir
, p
, n
);
138 if (ret
< 0 || ret
>= len
) {
145 static sem_t
*lxc_new_unnamed_sem(void)
150 s
= malloc(sizeof(*s
));
153 ret
= sem_init(s
, 0, 1);
161 struct lxc_lock
*lxc_newlock(const char *lxcpath
, const char *name
)
165 l
= malloc(sizeof(*l
));
170 l
->type
= LXC_LOCK_ANON_SEM
;
171 l
->u
.sem
= lxc_new_unnamed_sem();
179 l
->type
= LXC_LOCK_FLOCK
;
180 l
->u
.f
.fname
= lxclock_name(lxcpath
, name
);
192 int lxclock(struct lxc_lock
*l
, int timeout
)
194 int ret
= -1, saved_errno
= errno
;
198 case LXC_LOCK_ANON_SEM
:
200 ret
= sem_wait(l
->u
.sem
);
205 if (clock_gettime(CLOCK_REALTIME
, &ts
) == -1) {
209 ts
.tv_sec
+= timeout
;
210 ret
= sem_timedwait(l
->u
.sem
, &ts
);
218 ERROR("Error: timeout not supported with flock");
223 ERROR("Error: filename not set for flock");
227 if (l
->u
.f
.fd
== -1) {
228 l
->u
.f
.fd
= open(l
->u
.f
.fname
, O_RDWR
|O_CREAT
,
230 if (l
->u
.f
.fd
== -1) {
231 ERROR("Error opening %s", l
->u
.f
.fname
);
236 lk
.l_whence
= SEEK_SET
;
239 ret
= fcntl(l
->u
.f
.fd
, F_SETLKW
, &lk
);
250 int lxcunlock(struct lxc_lock
*l
)
252 int ret
= 0, saved_errno
= errno
;
256 case LXC_LOCK_ANON_SEM
:
260 ret
= sem_post(l
->u
.sem
);
265 if (l
->u
.f
.fd
!= -1) {
267 lk
.l_whence
= SEEK_SET
;
270 ret
= fcntl(l
->u
.f
.fd
, F_SETLK
, &lk
);
285 * lxc_putlock() is only called when a container_new() fails,
286 * or during container_put(), which is already guaranteed to
287 * only be done by one task.
288 * So the only exclusion we need to provide here is for regular
289 * thread safety (i.e. file descriptor table changes).
291 void lxc_putlock(struct lxc_lock
*l
)
296 case LXC_LOCK_ANON_SEM
:
298 sem_destroy(l
->u
.sem
);
304 if (l
->u
.f
.fd
!= -1) {
315 void process_lock(void)
317 lock_mutex(&thread_mutex
);
320 void process_unlock(void)
322 unlock_mutex(&thread_mutex
);
325 /* One thread can do fork() while another one is holding a mutex.
326 * There is only one thread in child just after the fork(), so no one will ever release that mutex.
327 * We setup a "child" fork handler to unlock the mutex just after the fork().
328 * For several mutex types, unlocking an unlocked mutex can lead to undefined behavior.
329 * One way to deal with it is to setup "prepare" fork handler
330 * to lock the mutex before fork() and both "parent" and "child" fork handlers
331 * to unlock the mutex.
332 * This forbids doing fork() while explicitly holding the lock.
334 #ifdef HAVE_PTHREAD_ATFORK
335 __attribute__((constructor
))
336 static void process_lock_setup_atfork(void)
338 pthread_atfork(process_lock
, process_unlock
, process_unlock
);
342 int container_mem_lock(struct lxc_container
*c
)
344 return lxclock(c
->privlock
, 0);
347 void container_mem_unlock(struct lxc_container
*c
)
349 lxcunlock(c
->privlock
);
352 int container_disk_lock(struct lxc_container
*c
)
356 if ((ret
= lxclock(c
->privlock
, 0)))
358 if ((ret
= lxclock(c
->slock
, 0))) {
359 lxcunlock(c
->privlock
);
365 void container_disk_unlock(struct lxc_container
*c
)
368 lxcunlock(c
->privlock
);