2 * Support for dynamic reconfiguration for PCI, Memory, and CPU
3 * Hotplug and Dynamic Logical Partitioning on RPA platforms.
5 * Copyright (C) 2009 Nathan Fontenot
6 * Copyright (C) 2009 IBM Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
13 #define pr_fmt(fmt) "dlpar: " fmt
15 #include <linux/kernel.h>
16 #include <linux/notifier.h>
17 #include <linux/spinlock.h>
18 #include <linux/cpu.h>
19 #include <linux/slab.h>
22 #include "of_helpers.h"
26 #include <asm/machdep.h>
27 #include <linux/uaccess.h>
30 static struct workqueue_struct
*pseries_hp_wq
;
32 struct pseries_hp_work
{
33 struct work_struct work
;
34 struct pseries_hp_errorlog
*errlog
;
35 struct completion
*hp_completion
;
47 void dlpar_free_cc_property(struct property
*prop
)
54 static struct property
*dlpar_parse_cc_property(struct cc_workarea
*ccwa
)
56 struct property
*prop
;
60 prop
= kzalloc(sizeof(*prop
), GFP_KERNEL
);
64 name
= (char *)ccwa
+ be32_to_cpu(ccwa
->name_offset
);
65 prop
->name
= kstrdup(name
, GFP_KERNEL
);
67 prop
->length
= be32_to_cpu(ccwa
->prop_length
);
68 value
= (char *)ccwa
+ be32_to_cpu(ccwa
->prop_offset
);
69 prop
->value
= kmemdup(value
, prop
->length
, GFP_KERNEL
);
71 dlpar_free_cc_property(prop
);
78 static struct device_node
*dlpar_parse_cc_node(struct cc_workarea
*ccwa
,
81 struct device_node
*dn
;
84 /* If parent node path is "/" advance path to NULL terminator to
85 * prevent double leading slashs in full_name.
90 dn
= kzalloc(sizeof(*dn
), GFP_KERNEL
);
94 name
= (char *)ccwa
+ be32_to_cpu(ccwa
->name_offset
);
95 dn
->full_name
= kasprintf(GFP_KERNEL
, "%s/%s", path
, name
);
101 of_node_set_flag(dn
, OF_DYNAMIC
);
107 static void dlpar_free_one_cc_node(struct device_node
*dn
)
109 struct property
*prop
;
111 while (dn
->properties
) {
112 prop
= dn
->properties
;
113 dn
->properties
= prop
->next
;
114 dlpar_free_cc_property(prop
);
117 kfree(dn
->full_name
);
121 void dlpar_free_cc_nodes(struct device_node
*dn
)
124 dlpar_free_cc_nodes(dn
->child
);
127 dlpar_free_cc_nodes(dn
->sibling
);
129 dlpar_free_one_cc_node(dn
);
133 #define NEXT_SIBLING 1
135 #define NEXT_PROPERTY 3
136 #define PREV_PARENT 4
137 #define MORE_MEMORY 5
138 #define CALL_AGAIN -2
139 #define ERR_CFG_USE -9003
141 struct device_node
*dlpar_configure_connector(__be32 drc_index
,
142 struct device_node
*parent
)
144 struct device_node
*dn
;
145 struct device_node
*first_dn
= NULL
;
146 struct device_node
*last_dn
= NULL
;
147 struct property
*property
;
148 struct property
*last_property
= NULL
;
149 struct cc_workarea
*ccwa
;
151 const char *parent_path
= parent
->full_name
;
155 cc_token
= rtas_token("ibm,configure-connector");
156 if (cc_token
== RTAS_UNKNOWN_SERVICE
)
159 data_buf
= kzalloc(RTAS_DATA_BUF_SIZE
, GFP_KERNEL
);
163 ccwa
= (struct cc_workarea
*)&data_buf
[0];
164 ccwa
->drc_index
= drc_index
;
168 /* Since we release the rtas_data_buf lock between configure
169 * connector calls we want to re-populate the rtas_data_buffer
170 * with the contents of the previous call.
172 spin_lock(&rtas_data_buf_lock
);
174 memcpy(rtas_data_buf
, data_buf
, RTAS_DATA_BUF_SIZE
);
175 rc
= rtas_call(cc_token
, 2, 1, NULL
, rtas_data_buf
, NULL
);
176 memcpy(data_buf
, rtas_data_buf
, RTAS_DATA_BUF_SIZE
);
178 spin_unlock(&rtas_data_buf_lock
);
185 dn
= dlpar_parse_cc_node(ccwa
, parent_path
);
189 dn
->parent
= last_dn
->parent
;
190 last_dn
->sibling
= dn
;
196 parent_path
= last_dn
->full_name
;
198 dn
= dlpar_parse_cc_node(ccwa
, parent_path
);
206 dn
->parent
= last_dn
;
215 property
= dlpar_parse_cc_property(ccwa
);
219 if (!last_dn
->properties
)
220 last_dn
->properties
= property
;
222 last_property
->next
= property
;
224 last_property
= property
;
228 last_dn
= last_dn
->parent
;
229 parent_path
= last_dn
->parent
->full_name
;
238 printk(KERN_ERR
"Unexpected Error (%d) "
239 "returned from configure-connector\n", rc
);
249 dlpar_free_cc_nodes(first_dn
);
257 int dlpar_attach_node(struct device_node
*dn
, struct device_node
*parent
)
263 rc
= of_attach_node(dn
);
265 printk(KERN_ERR
"Failed to add device node %pOF\n", dn
);
269 of_node_put(dn
->parent
);
273 int dlpar_detach_node(struct device_node
*dn
)
275 struct device_node
*child
;
278 child
= of_get_next_child(dn
, NULL
);
280 dlpar_detach_node(child
);
281 child
= of_get_next_child(dn
, child
);
284 rc
= of_detach_node(dn
);
291 #define DR_ENTITY_SENSE 9003
292 #define DR_ENTITY_PRESENT 1
293 #define DR_ENTITY_UNUSABLE 2
294 #define ALLOCATION_STATE 9003
295 #define ALLOC_UNUSABLE 0
296 #define ALLOC_USABLE 1
297 #define ISOLATION_STATE 9001
301 int dlpar_acquire_drc(u32 drc_index
)
305 rc
= rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status
,
306 DR_ENTITY_SENSE
, drc_index
);
307 if (rc
|| dr_status
!= DR_ENTITY_UNUSABLE
)
310 rc
= rtas_set_indicator(ALLOCATION_STATE
, drc_index
, ALLOC_USABLE
);
314 rc
= rtas_set_indicator(ISOLATION_STATE
, drc_index
, UNISOLATE
);
316 rtas_set_indicator(ALLOCATION_STATE
, drc_index
, ALLOC_UNUSABLE
);
323 int dlpar_release_drc(u32 drc_index
)
327 rc
= rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status
,
328 DR_ENTITY_SENSE
, drc_index
);
329 if (rc
|| dr_status
!= DR_ENTITY_PRESENT
)
332 rc
= rtas_set_indicator(ISOLATION_STATE
, drc_index
, ISOLATE
);
336 rc
= rtas_set_indicator(ALLOCATION_STATE
, drc_index
, ALLOC_UNUSABLE
);
338 rtas_set_indicator(ISOLATION_STATE
, drc_index
, UNISOLATE
);
345 static int handle_dlpar_errorlog(struct pseries_hp_errorlog
*hp_elog
)
349 /* pseries error logs are in BE format, convert to cpu type */
350 switch (hp_elog
->id_type
) {
351 case PSERIES_HP_ELOG_ID_DRC_COUNT
:
352 hp_elog
->_drc_u
.drc_count
=
353 be32_to_cpu(hp_elog
->_drc_u
.drc_count
);
355 case PSERIES_HP_ELOG_ID_DRC_INDEX
:
356 hp_elog
->_drc_u
.drc_index
=
357 be32_to_cpu(hp_elog
->_drc_u
.drc_index
);
359 case PSERIES_HP_ELOG_ID_DRC_IC
:
360 hp_elog
->_drc_u
.ic
.count
=
361 be32_to_cpu(hp_elog
->_drc_u
.ic
.count
);
362 hp_elog
->_drc_u
.ic
.index
=
363 be32_to_cpu(hp_elog
->_drc_u
.ic
.index
);
366 switch (hp_elog
->resource
) {
367 case PSERIES_HP_ELOG_RESOURCE_MEM
:
368 rc
= dlpar_memory(hp_elog
);
370 case PSERIES_HP_ELOG_RESOURCE_CPU
:
371 rc
= dlpar_cpu(hp_elog
);
374 pr_warn_ratelimited("Invalid resource (%d) specified\n",
382 static void pseries_hp_work_fn(struct work_struct
*work
)
384 struct pseries_hp_work
*hp_work
=
385 container_of(work
, struct pseries_hp_work
, work
);
388 *(hp_work
->rc
) = handle_dlpar_errorlog(hp_work
->errlog
);
390 handle_dlpar_errorlog(hp_work
->errlog
);
392 if (hp_work
->hp_completion
)
393 complete(hp_work
->hp_completion
);
395 kfree(hp_work
->errlog
);
399 void queue_hotplug_event(struct pseries_hp_errorlog
*hp_errlog
,
400 struct completion
*hotplug_done
, int *rc
)
402 struct pseries_hp_work
*work
;
403 struct pseries_hp_errorlog
*hp_errlog_copy
;
405 hp_errlog_copy
= kmalloc(sizeof(struct pseries_hp_errorlog
),
407 memcpy(hp_errlog_copy
, hp_errlog
, sizeof(struct pseries_hp_errorlog
));
409 work
= kmalloc(sizeof(struct pseries_hp_work
), GFP_KERNEL
);
411 INIT_WORK((struct work_struct
*)work
, pseries_hp_work_fn
);
412 work
->errlog
= hp_errlog_copy
;
413 work
->hp_completion
= hotplug_done
;
415 queue_work(pseries_hp_wq
, (struct work_struct
*)work
);
418 kfree(hp_errlog_copy
);
419 complete(hotplug_done
);
423 static int dlpar_parse_resource(char **cmd
, struct pseries_hp_errorlog
*hp_elog
)
427 arg
= strsep(cmd
, " ");
431 if (sysfs_streq(arg
, "memory")) {
432 hp_elog
->resource
= PSERIES_HP_ELOG_RESOURCE_MEM
;
433 } else if (sysfs_streq(arg
, "cpu")) {
434 hp_elog
->resource
= PSERIES_HP_ELOG_RESOURCE_CPU
;
436 pr_err("Invalid resource specified.\n");
443 static int dlpar_parse_action(char **cmd
, struct pseries_hp_errorlog
*hp_elog
)
447 arg
= strsep(cmd
, " ");
451 if (sysfs_streq(arg
, "add")) {
452 hp_elog
->action
= PSERIES_HP_ELOG_ACTION_ADD
;
453 } else if (sysfs_streq(arg
, "remove")) {
454 hp_elog
->action
= PSERIES_HP_ELOG_ACTION_REMOVE
;
456 pr_err("Invalid action specified.\n");
463 static int dlpar_parse_id_type(char **cmd
, struct pseries_hp_errorlog
*hp_elog
)
468 arg
= strsep(cmd
, " ");
472 if (sysfs_streq(arg
, "indexed-count")) {
473 hp_elog
->id_type
= PSERIES_HP_ELOG_ID_DRC_IC
;
474 arg
= strsep(cmd
, " ");
476 pr_err("No DRC count specified.\n");
480 if (kstrtou32(arg
, 0, &count
)) {
481 pr_err("Invalid DRC count specified.\n");
485 arg
= strsep(cmd
, " ");
487 pr_err("No DRC Index specified.\n");
491 if (kstrtou32(arg
, 0, &index
)) {
492 pr_err("Invalid DRC Index specified.\n");
496 hp_elog
->_drc_u
.ic
.count
= cpu_to_be32(count
);
497 hp_elog
->_drc_u
.ic
.index
= cpu_to_be32(index
);
498 } else if (sysfs_streq(arg
, "index")) {
499 hp_elog
->id_type
= PSERIES_HP_ELOG_ID_DRC_INDEX
;
500 arg
= strsep(cmd
, " ");
502 pr_err("No DRC Index specified.\n");
506 if (kstrtou32(arg
, 0, &index
)) {
507 pr_err("Invalid DRC Index specified.\n");
511 hp_elog
->_drc_u
.drc_index
= cpu_to_be32(index
);
512 } else if (sysfs_streq(arg
, "count")) {
513 hp_elog
->id_type
= PSERIES_HP_ELOG_ID_DRC_COUNT
;
514 arg
= strsep(cmd
, " ");
516 pr_err("No DRC count specified.\n");
520 if (kstrtou32(arg
, 0, &count
)) {
521 pr_err("Invalid DRC count specified.\n");
525 hp_elog
->_drc_u
.drc_count
= cpu_to_be32(count
);
527 pr_err("Invalid id_type specified.\n");
534 static ssize_t
dlpar_store(struct class *class, struct class_attribute
*attr
,
535 const char *buf
, size_t count
)
537 struct pseries_hp_errorlog
*hp_elog
;
538 struct completion hotplug_done
;
543 args
= argbuf
= kstrdup(buf
, GFP_KERNEL
);
544 hp_elog
= kzalloc(sizeof(*hp_elog
), GFP_KERNEL
);
545 if (!hp_elog
|| !argbuf
) {
546 pr_info("Could not allocate resources for DLPAR operation\n");
553 * Parse out the request from the user, this will be in the form:
554 * <resource> <action> <id_type> <id>
556 rc
= dlpar_parse_resource(&args
, hp_elog
);
558 goto dlpar_store_out
;
560 rc
= dlpar_parse_action(&args
, hp_elog
);
562 goto dlpar_store_out
;
564 rc
= dlpar_parse_id_type(&args
, hp_elog
);
566 goto dlpar_store_out
;
568 init_completion(&hotplug_done
);
569 queue_hotplug_event(hp_elog
, &hotplug_done
, &rc
);
570 wait_for_completion(&hotplug_done
);
577 pr_err("Could not handle DLPAR request \"%s\"\n", buf
);
579 return rc
? rc
: count
;
582 static ssize_t
dlpar_show(struct class *class, struct class_attribute
*attr
,
585 return sprintf(buf
, "%s\n", "memory,cpu");
588 static CLASS_ATTR_RW(dlpar
);
590 static int __init
pseries_dlpar_init(void)
592 pseries_hp_wq
= alloc_workqueue("pseries hotplug workqueue",
594 return sysfs_create_file(kernel_kobj
, &class_attr_dlpar
.attr
);
596 machine_device_initcall(pseries
, pseries_dlpar_init
);