]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/asm-um/cache.h
sched: fix: move the CPU check into ->task_new_fair()
[mirror_ubuntu-artful-kernel.git] / include / asm-um / cache.h
1 #ifndef __UM_CACHE_H
2 #define __UM_CACHE_H
3
4
5 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
6 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7 #elif defined(CONFIG_UML_X86) /* 64-bit */
8 # define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
9 #else
10 /* XXX: this was taken from x86, now it's completely random. Luckily only
11 * affects SMP padding. */
12 # define L1_CACHE_SHIFT 5
13 #endif
14
15 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16
17 #endif