]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
MIPS: Netlogic: Add cpu to node mapping for XLP9XX
authorJayachandran C <jchandra@broadcom.com>
Sat, 21 Dec 2013 11:22:26 +0000 (16:52 +0530)
committerRalf Baechle <ralf@linux-mips.org>
Fri, 24 Jan 2014 21:39:49 +0000 (22:39 +0100)
XLP9XX has 20 cores per node, opposed to 8 on earlier XLP8XX.
Update code that calculates node id from cpu id to handle this.

Signed-off-by: Jayachandran C <jchandra@broadcom.com>
Signed-off-by: John Crispin <blogic@openwrt.org>
Patchwork: http://patchwork.linux-mips.org/patch/6283/

arch/mips/include/asm/mach-netlogic/multi-node.h
arch/mips/include/asm/netlogic/mips-extns.h
arch/mips/netlogic/common/irq.c
arch/mips/netlogic/common/smp.c
arch/mips/netlogic/xlp/setup.c
arch/mips/netlogic/xlp/wakeup.c
arch/mips/netlogic/xlr/wakeup.c
arch/mips/pci/msi-xlp.c

index df9869d13afda8b62a0d4621f56812ca8307fae8..9ed8dacdc37c5aa3bae8bbfc4e31cf5c6b51bed6 100644 (file)
 #endif
 #endif
 
-#define NLM_CORES_PER_NODE     8
 #define NLM_THREADS_PER_CORE   4
-#define NLM_CPUS_PER_NODE      (NLM_CORES_PER_NODE * NLM_THREADS_PER_CORE)
+#ifdef CONFIG_CPU_XLR
+#define nlm_cores_per_node()   8
+#else
+extern unsigned int xlp_cores_per_node;
+#define nlm_cores_per_node()   xlp_cores_per_node
+#endif
+
+#define nlm_threads_per_node() (nlm_cores_per_node() * NLM_THREADS_PER_CORE)
+#define nlm_cpuid_to_node(c)   ((c) / nlm_threads_per_node())
 
 struct nlm_soc_info {
        unsigned long   coremask;       /* cores enabled on the soc */
index f299d31d7c1a3faf7eeffd22f7409cce90954cd6..de9aada6f4c1cbd6f0bee1997f7e1744694b04f2 100644 (file)
@@ -146,7 +146,12 @@ static inline int hard_smp_processor_id(void)
 
 static inline int nlm_nodeid(void)
 {
-       return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
+       uint32_t prid = read_c0_prid();
+
+       if ((prid & 0xff00) == PRID_IMP_NETLOGIC_XLP9XX)
+               return (__read_32bit_c0_register($15, 1) >> 7) & 0x7;
+       else
+               return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
 }
 
 static inline unsigned int nlm_core_id(void)
index 3800bf6551ab43c51d24e2ebf3829cbd4cde0885..8092bb3206186bfdc3e4acbc39a7a4cde72177a3 100644 (file)
@@ -223,7 +223,7 @@ static void nlm_init_node_irqs(int node)
                        continue;
 
                nlm_pic_init_irt(nodep->picbase, irt, i,
-                                       node * NLM_CPUS_PER_NODE, 0);
+                               node * nlm_threads_per_node(), 0);
                nlm_setup_pic_irq(node, i, i, irt);
        }
 }
@@ -232,8 +232,8 @@ void nlm_smp_irq_init(int hwcpuid)
 {
        int node, cpu;
 
-       node = hwcpuid / NLM_CPUS_PER_NODE;
-       cpu  = hwcpuid % NLM_CPUS_PER_NODE;
+       node = nlm_cpuid_to_node(hwcpuid);
+       cpu  = hwcpuid % nlm_threads_per_node();
 
        if (cpu == 0 && node != 0)
                nlm_init_node_irqs(node);
index c0eded01fde96ef23676eabc28e72ceb286467ad..6baae15cc7b182e8840e71d58b7a8e3669385ca4 100644 (file)
@@ -63,7 +63,7 @@ void nlm_send_ipi_single(int logical_cpu, unsigned int action)
        uint64_t picbase;
 
        cpu = cpu_logical_map(logical_cpu);
-       node = cpu / NLM_CPUS_PER_NODE;
+       node = nlm_cpuid_to_node(cpu);
        picbase = nlm_get_node(node)->picbase;
 
        if (action & SMP_CALL_FUNCTION)
@@ -152,7 +152,7 @@ void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
        int cpu, node;
 
        cpu = cpu_logical_map(logical_cpu);
-       node = cpu / NLM_CPUS_PER_NODE;
+       node = nlm_cpuid_to_node(logical_cpu);
        nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
        nlm_next_gp = (unsigned long)task_thread_info(idle);
 
@@ -164,7 +164,7 @@ void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
 void __init nlm_smp_setup(void)
 {
        unsigned int boot_cpu;
-       int num_cpus, i, ncore;
+       int num_cpus, i, ncore, node;
        volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
        char buf[64];
 
@@ -187,6 +187,8 @@ void __init nlm_smp_setup(void)
                        __cpu_number_map[i] = num_cpus;
                        __cpu_logical_map[num_cpus] = i;
                        set_cpu_possible(num_cpus, true);
+                       node = nlm_cpuid_to_node(i);
+                       cpumask_set_cpu(num_cpus, &nlm_get_node(node)->cpumask);
                        ++num_cpus;
                }
        }
index 310d88a82abe8ac06c3dac793c0bdc32f54e1c13..2a39bbeb45b0dd0d2beeb651749d853deaca8138 100644 (file)
@@ -51,6 +51,7 @@ uint64_t nlm_io_base;
 struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
 cpumask_t nlm_cpumask = CPU_MASK_CPU0;
 unsigned int nlm_threads_per_core;
+unsigned int xlp_cores_per_node;
 
 static void nlm_linux_exit(void)
 {
@@ -154,6 +155,10 @@ void __init prom_init(void)
        void *reset_vec;
 
        nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
+       if (cpu_is_xlp9xx())
+               xlp_cores_per_node = 32;
+       else
+               xlp_cores_per_node = 8;
        nlm_init_boot_cpu();
        xlp_mmu_init();
        nlm_node_init(0);
index a5d6647e5fe8030f627f885483036ffb2a6bea1d..bbd53f8e92dbfa4ea8c87d15b3f9cc76cf28d5a6 100644 (file)
@@ -165,7 +165,7 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
                        nodep->coremask = 1;
 
                pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask);
-               for (core = 0; core < NLM_CORES_PER_NODE; core++) {
+               for (core = 0; core < nlm_cores_per_node(); core++) {
                        /* we will be on node 0 core 0 */
                        if (n == 0 && core == 0)
                                continue;
@@ -175,7 +175,7 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
                                continue;
 
                        /* see if at least the first hw thread is enabled */
-                       cpu = (n * NLM_CORES_PER_NODE + core)
+                       cpu = (n * nlm_cores_per_node() + core)
                                                * NLM_THREADS_PER_CORE;
                        if (!cpumask_test_cpu(cpu, wakeup_mask))
                                continue;
index 9fb81fa6272a753b805952166dd5e8dc45cb93ee..ec60e7169389d9d64a2a410f7dbf8f4f58183b67 100644 (file)
@@ -70,7 +70,7 @@ int xlr_wakeup_secondary_cpus(void)
 
        /* Fill up the coremask early */
        nodep->coremask = 1;
-       for (i = 1; i < NLM_CORES_PER_NODE; i++) {
+       for (i = 1; i < nlm_cores_per_node(); i++) {
                for (j = 1000000; j > 0; j--) {
                        if (cpu_ready[i * NLM_THREADS_PER_CORE])
                                break;
index 66a244a73a821f7d94593458c4877023a7350ead..afd8405e0188248dbea2dd90e2e8949e25895d67 100644 (file)
@@ -280,7 +280,7 @@ static int xlp_setup_msi(uint64_t lnkbase, int node, int link,
                irt = PIC_IRT_PCIE_LINK_INDEX(link);
                nlm_setup_pic_irq(node, lirq, lirq, irt);
                nlm_pic_init_irt(nlm_get_node(node)->picbase, irt, lirq,
-                                node * NLM_CPUS_PER_NODE, 1 /*en */);
+                                node * nlm_threads_per_node(), 1 /*en */);
        }
 
        /* allocate a MSI vec, and tell the bridge about it */
@@ -443,7 +443,7 @@ void __init xlp_init_node_msi_irqs(int node, int link)
                msixvec = link * XLP_MSIXVEC_PER_LINK + i;
                irt = PIC_IRT_PCIE_MSIX_INDEX(msixvec);
                nlm_pic_init_irt(nodep->picbase, irt, PIC_PCIE_MSIX_IRQ(link),
-                       node * NLM_CPUS_PER_NODE, 1 /* enable */);
+                       node * nlm_threads_per_node(), 1 /* enable */);
 
                /* Initialize MSI-X extended irq space for the link  */
                irq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, i));