extern int get_dumpable(struct mm_struct *mm);
/* mm flags */
+/* dumpable bits */
#define MMF_DUMPABLE 0 /* core dump is permitted */
#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */
+#define MMF_DUMPABLE_BITS 2
+
+/* coredump filter bits */
+#define MMF_DUMP_ANON_PRIVATE 2
+#define MMF_DUMP_ANON_SHARED 3
+#define MMF_DUMP_MAPPED_PRIVATE 4
+#define MMF_DUMP_MAPPED_SHARED 5
+#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
+#define MMF_DUMP_FILTER_BITS 4
+#define MMF_DUMP_FILTER_MASK \
+ (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
+#define MMF_DUMP_FILTER_DEFAULT \
+ ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED))
struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
#define SCHED_LOAD_SHIFT 10
#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
-#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 5)
+#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 1)
#ifdef CONFIG_SMP
#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
unsigned long max_interval; /* Maximum balance interval ms */
unsigned int busy_factor; /* less balancing by factor if busy */
unsigned int imbalance_pct; /* No balance until over watermark */
- unsigned long long cache_hot_time; /* Task considered cache hot (ns) */
unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
unsigned int busy_idx;
unsigned int idle_idx;
#endif /* CONFIG_SMP */
+/*
+ * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
+ * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
+ * task of nice 0 or enough lower priority tasks to bring up the
+ * weighted_cpuload
+ */
+static inline int above_background_load(void)
+{
+ unsigned long cpu;
+
+ for_each_online_cpu(cpu) {
+ if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE)
+ return 1;
+ }
+ return 0;
+}
struct io_context; /* See blkdev.h */
struct cpuset;
struct task_struct * (*pick_next_task) (struct rq *rq, u64 now);
void (*put_prev_task) (struct rq *rq, struct task_struct *p, u64 now);
- int (*load_balance) (struct rq *this_rq, int this_cpu,
+ unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
struct rq *busiest,
unsigned long max_nr_move, unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
- int *all_pinned, unsigned long *total_load_moved);
+ int *all_pinned, int *this_best_prio);
void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p);
- void (*task_new) (struct rq *rq, struct task_struct *p);
+ void (*task_new) (struct rq *rq, struct task_struct *p, u64 now);
};
struct load_weight {
struct rb_node run_node;
unsigned int on_rq;
+ u64 exec_start;
+ u64 sum_exec_runtime;
u64 wait_start_fair;
+ u64 sleep_start_fair;
+
+#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
- u64 exec_start;
+ u64 wait_max;
+ s64 sum_wait_runtime;
+
u64 sleep_start;
- u64 sleep_start_fair;
- u64 block_start;
u64 sleep_max;
+ s64 sum_sleep_runtime;
+
+ u64 block_start;
u64 block_max;
u64 exec_max;
- u64 wait_max;
- u64 last_ran;
- u64 sum_exec_runtime;
- s64 sum_wait_runtime;
- s64 sum_sleep_runtime;
unsigned long wait_runtime_overruns;
unsigned long wait_runtime_underruns;
+#endif
+
#ifdef CONFIG_FAIR_GROUP_SCHED
struct sched_entity *parent;
/* rq on which this entity is (to be) queued: */
struct sched_class *sched_class;
struct sched_entity se;
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+ /* list of struct preempt_notifier: */
+ struct hlist_head preempt_notifiers;
+#endif
+
unsigned short ioprio;
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
#endif
extern unsigned long long sched_clock(void);
+
+/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+extern unsigned long long cpu_clock(int cpu);
+
extern unsigned long long
task_sched_runtime(struct task_struct *task);