*/
static DECLARE_MUTEX(cpuset_sem);
+static struct task_struct *cpuset_sem_owner;
+static int cpuset_sem_depth;
/*
* The global cpuset semaphore cpuset_sem can be needed by the
static inline void cpuset_down(struct semaphore *psem)
{
- if (current->cpuset_sem_nest_depth == 0)
+ if (cpuset_sem_owner != current) {
down(psem);
- current->cpuset_sem_nest_depth++;
+ cpuset_sem_owner = current;
+ }
+ cpuset_sem_depth++;
}
static inline void cpuset_up(struct semaphore *psem)
{
- current->cpuset_sem_nest_depth--;
- if (current->cpuset_sem_nest_depth == 0)
+ if (--cpuset_sem_depth == 0) {
+ cpuset_sem_owner = NULL;
up(psem);
+ }
}
/*
char *page;
ssize_t retval = 0;
char *s;
- char *start;
- size_t n;
if (!(page = (char *)__get_free_page(GFP_KERNEL)))
return -ENOMEM;
*s++ = '\n';
*s = '\0';
- /* Do nothing if *ppos is at the eof or beyond the eof. */
- if (s - page <= *ppos)
- return 0;
-
- start = page + *ppos;
- n = s - start;
- retval = n - copy_to_user(buf, start, min(n, nbytes));
- *ppos += retval;
+ retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
out:
free_page((unsigned long)page);
return retval;
* GFP_USER - only nodes in current tasks mems allowed ok.
**/
-int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask)
+int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
int node; /* node that zone z is on */
const struct cpuset *cs; /* current cpuset ancestors */