#include <asm/cpu.h>
#define SCLP_CHP_INFO_MASK_SIZE 32
+#define SCLP_MAX_CORES 256
struct sclp_chp_info {
u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
char loadparm[LOADPARM_LEN];
};
-struct sclp_cpu_entry {
+struct sclp_core_entry {
u8 core_id;
u8 reserved0[2];
u8 : 3;
u8 reserved1;
} __attribute__((packed));
-struct sclp_cpu_info {
+struct sclp_core_info {
unsigned int configured;
unsigned int standby;
unsigned int combined;
- int has_cpu_type;
- struct sclp_cpu_entry cpu[MAX_CPU_ADDRESS + 1];
+ struct sclp_core_entry core[SCLP_MAX_CORES];
};
struct sclp_info {
unsigned char has_vt220 : 1;
unsigned char has_siif : 1;
unsigned char has_sigpif : 1;
- unsigned char has_cpu_type : 1;
+ unsigned char has_core_type : 1;
unsigned char has_sprp : 1;
unsigned int ibc;
unsigned int mtid;
unsigned long long rzm;
unsigned long long rnmax;
unsigned long long hamax;
- unsigned int max_cpu;
+ unsigned int max_cores;
unsigned long hsa_size;
unsigned long long facilities;
};
extern struct sclp_info sclp;
-int sclp_get_cpu_info(struct sclp_cpu_info *info);
-int sclp_cpu_configure(u8 cpu);
-int sclp_cpu_deconfigure(u8 cpu);
+int sclp_get_core_info(struct sclp_core_info *info);
+int sclp_core_configure(u8 core);
+int sclp_core_deconfigure(u8 core);
int sclp_sdias_blk_count(void);
int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
int sclp_chp_configure(struct chp_id chpid);
u16 address; /* physical cpu address */
};
-static u8 boot_cpu_type;
+static u8 boot_core_type;
static struct pcpu pcpu_devices[NR_CPUS];
unsigned int smp_cpu_mt_shift;
* old system. The ELF sections are picked up by the crash_dump code
* via elfcorehdr_addr.
*/
-static void __init smp_store_cpu_states(struct sclp_cpu_info *info)
+static void __init smp_store_cpu_states(struct sclp_core_info *info)
{
unsigned int cpu, address, i, j;
int is_boot_cpu;
cpu = 0;
for (i = 0; i < info->configured; i++) {
/* Skip CPUs with different CPU type. */
- if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
+ if (sclp.has_core_type && info->core[i].type != boot_core_type)
continue;
for (j = 0; j <= smp_cpu_mtid; j++, cpu++) {
- address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j;
+ address = (info->core[i].core_id << smp_cpu_mt_shift) + j;
is_boot_cpu = (address == pcpu_devices[0].address);
if (is_boot_cpu && !OLDMEM_BASE)
/* Skip boot CPU for standard zfcp dump. */
return pcpu_devices[cpu].polarization;
}
-static struct sclp_cpu_info *smp_get_cpu_info(void)
+static struct sclp_core_info *smp_get_core_info(void)
{
static int use_sigp_detection;
- struct sclp_cpu_info *info;
+ struct sclp_core_info *info;
int address;
info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
+ if (info && (use_sigp_detection || sclp_get_core_info(info))) {
use_sigp_detection = 1;
for (address = 0;
- address <= (MAX_CPU_ADDRESS << smp_cpu_mt_shift);
+ address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
address += (1U << smp_cpu_mt_shift)) {
if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
SIGP_CC_NOT_OPERATIONAL)
continue;
- info->cpu[info->configured].core_id =
+ info->core[info->configured].core_id =
address >> smp_cpu_mt_shift;
info->configured++;
}
static int smp_add_present_cpu(int cpu);
-static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
+static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
{
struct pcpu *pcpu;
cpumask_t avail;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
cpu = cpumask_first(&avail);
for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
- if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
+ if (sclp.has_core_type && info->core[i].type != boot_core_type)
continue;
- address = info->cpu[i].core_id << smp_cpu_mt_shift;
+ address = info->core[i].core_id << smp_cpu_mt_shift;
for (j = 0; j <= smp_cpu_mtid; j++) {
if (pcpu_find_address(cpu_present_mask, address + j))
continue;
static void __init smp_detect_cpus(void)
{
unsigned int cpu, mtid, c_cpus, s_cpus;
- struct sclp_cpu_info *info;
+ struct sclp_core_info *info;
u16 address;
/* Get CPU information */
- info = smp_get_cpu_info();
+ info = smp_get_core_info();
if (!info)
panic("smp_detect_cpus failed to allocate memory\n");
/* Find boot CPU type */
- if (info->has_cpu_type) {
+ if (sclp.has_core_type) {
address = stap();
for (cpu = 0; cpu < info->combined; cpu++)
- if (info->cpu[cpu].core_id == address) {
+ if (info->core[cpu].core_id == address) {
/* The boot cpu dictates the cpu type. */
- boot_cpu_type = info->cpu[cpu].type;
+ boot_core_type = info->core[cpu].type;
break;
}
if (cpu >= info->combined)
#endif
/* Set multi-threading state for the current system */
- mtid = boot_cpu_type ? sclp.mtid : sclp.mtid_cp;
+ mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
pcpu_set_smt(mtid);
/* Print number of CPUs */
c_cpus = s_cpus = 0;
for (cpu = 0; cpu < info->combined; cpu++) {
- if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
+ if (sclp.has_core_type &&
+ info->core[cpu].type != boot_core_type)
continue;
if (cpu < info->configured)
c_cpus += smp_cpu_mtid + 1;
sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
sclp_max = min(smp_max_threads, sclp_max);
- sclp_max = sclp.max_cpu * sclp_max ?: nr_cpu_ids;
+ sclp_max = sclp.max_cores * sclp_max ?: nr_cpu_ids;
possible = setup_possible_cpus ?: nr_cpu_ids;
possible = min(possible, sclp_max);
for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
case 0:
if (pcpu->state != CPU_STATE_CONFIGURED)
break;
- rc = sclp_cpu_deconfigure(pcpu->address >> smp_cpu_mt_shift);
+ rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
if (rc)
break;
for (i = 0; i <= smp_cpu_mtid; i++) {
case 1:
if (pcpu->state != CPU_STATE_STANDBY)
break;
- rc = sclp_cpu_configure(pcpu->address >> smp_cpu_mt_shift);
+ rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
if (rc)
break;
for (i = 0; i <= smp_cpu_mtid; i++) {
int __ref smp_rescan_cpus(void)
{
- struct sclp_cpu_info *info;
+ struct sclp_core_info *info;
int nr;
- info = smp_get_cpu_info();
+ info = smp_get_core_info();
if (!info)
return -ENOMEM;
get_online_cpus();
u8 reserved[4096 - 16];
} __attribute__((packed, aligned(PAGE_SIZE)));
-static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
- struct read_cpu_info_sccb *sccb)
+static void sclp_fill_core_info(struct sclp_core_info *info,
+ struct read_cpu_info_sccb *sccb)
{
char *page = (char *) sccb;
info->configured = sccb->nr_configured;
info->standby = sccb->nr_standby;
info->combined = sccb->nr_configured + sccb->nr_standby;
- info->has_cpu_type = sclp.has_cpu_type;
- memcpy(&info->cpu, page + sccb->offset_configured,
- info->combined * sizeof(struct sclp_cpu_entry));
+ memcpy(&info->core, page + sccb->offset_configured,
+ info->combined * sizeof(struct sclp_core_entry));
}
-int sclp_get_cpu_info(struct sclp_cpu_info *info)
+int sclp_get_core_info(struct sclp_core_info *info)
{
int rc;
struct read_cpu_info_sccb *sccb;
rc = -EIO;
goto out;
}
- sclp_fill_cpu_info(info, sccb);
+ sclp_fill_core_info(info, sccb);
out:
free_page((unsigned long) sccb);
return rc;
struct sccb_header header;
} __attribute__((packed, aligned(8)));
-static int do_cpu_configure(sclp_cmdw_t cmd)
+static int do_core_configure(sclp_cmdw_t cmd)
{
struct cpu_configure_sccb *sccb;
int rc;
return rc;
}
-int sclp_cpu_configure(u8 cpu)
+int sclp_core_configure(u8 core)
{
- return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
+ return do_core_configure(SCLP_CMDW_CONFIGURE_CPU | core << 8);
}
-int sclp_cpu_deconfigure(u8 cpu)
+int sclp_core_deconfigure(u8 core)
{
- return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
+ return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
}
#ifdef CONFIG_MEMORY_HOTPLUG