]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/powerpc/platforms/pseries/dlpar.c
Merge tag 'modules-for-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu...
[mirror_ubuntu-hirsute-kernel.git] / arch / powerpc / platforms / pseries / dlpar.c
1 /*
2 * Support for dynamic reconfiguration for PCI, Memory, and CPU
3 * Hotplug and Dynamic Logical Partitioning on RPA platforms.
4 *
5 * Copyright (C) 2009 Nathan Fontenot
6 * Copyright (C) 2009 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 */
12
13 #define pr_fmt(fmt) "dlpar: " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/notifier.h>
17 #include <linux/spinlock.h>
18 #include <linux/cpu.h>
19 #include <linux/slab.h>
20 #include <linux/of.h>
21
22 #include "of_helpers.h"
23 #include "pseries.h"
24
25 #include <asm/prom.h>
26 #include <asm/machdep.h>
27 #include <linux/uaccess.h>
28 #include <asm/rtas.h>
29
30 static struct workqueue_struct *pseries_hp_wq;
31
32 struct pseries_hp_work {
33 struct work_struct work;
34 struct pseries_hp_errorlog *errlog;
35 };
36
37 struct cc_workarea {
38 __be32 drc_index;
39 __be32 zero;
40 __be32 name_offset;
41 __be32 prop_length;
42 __be32 prop_offset;
43 };
44
45 void dlpar_free_cc_property(struct property *prop)
46 {
47 kfree(prop->name);
48 kfree(prop->value);
49 kfree(prop);
50 }
51
52 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
53 {
54 struct property *prop;
55 char *name;
56 char *value;
57
58 prop = kzalloc(sizeof(*prop), GFP_KERNEL);
59 if (!prop)
60 return NULL;
61
62 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
63 prop->name = kstrdup(name, GFP_KERNEL);
64
65 prop->length = be32_to_cpu(ccwa->prop_length);
66 value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
67 prop->value = kmemdup(value, prop->length, GFP_KERNEL);
68 if (!prop->value) {
69 dlpar_free_cc_property(prop);
70 return NULL;
71 }
72
73 return prop;
74 }
75
76 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
77 {
78 struct device_node *dn;
79 const char *name;
80
81 dn = kzalloc(sizeof(*dn), GFP_KERNEL);
82 if (!dn)
83 return NULL;
84
85 name = (const char *)ccwa + be32_to_cpu(ccwa->name_offset);
86 dn->full_name = kstrdup(name, GFP_KERNEL);
87 if (!dn->full_name) {
88 kfree(dn);
89 return NULL;
90 }
91
92 of_node_set_flag(dn, OF_DYNAMIC);
93 of_node_init(dn);
94
95 return dn;
96 }
97
98 static void dlpar_free_one_cc_node(struct device_node *dn)
99 {
100 struct property *prop;
101
102 while (dn->properties) {
103 prop = dn->properties;
104 dn->properties = prop->next;
105 dlpar_free_cc_property(prop);
106 }
107
108 kfree(dn->full_name);
109 kfree(dn);
110 }
111
112 void dlpar_free_cc_nodes(struct device_node *dn)
113 {
114 if (dn->child)
115 dlpar_free_cc_nodes(dn->child);
116
117 if (dn->sibling)
118 dlpar_free_cc_nodes(dn->sibling);
119
120 dlpar_free_one_cc_node(dn);
121 }
122
123 #define COMPLETE 0
124 #define NEXT_SIBLING 1
125 #define NEXT_CHILD 2
126 #define NEXT_PROPERTY 3
127 #define PREV_PARENT 4
128 #define MORE_MEMORY 5
129 #define CALL_AGAIN -2
130 #define ERR_CFG_USE -9003
131
132 struct device_node *dlpar_configure_connector(__be32 drc_index,
133 struct device_node *parent)
134 {
135 struct device_node *dn;
136 struct device_node *first_dn = NULL;
137 struct device_node *last_dn = NULL;
138 struct property *property;
139 struct property *last_property = NULL;
140 struct cc_workarea *ccwa;
141 char *data_buf;
142 int cc_token;
143 int rc = -1;
144
145 cc_token = rtas_token("ibm,configure-connector");
146 if (cc_token == RTAS_UNKNOWN_SERVICE)
147 return NULL;
148
149 data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
150 if (!data_buf)
151 return NULL;
152
153 ccwa = (struct cc_workarea *)&data_buf[0];
154 ccwa->drc_index = drc_index;
155 ccwa->zero = 0;
156
157 do {
158 /* Since we release the rtas_data_buf lock between configure
159 * connector calls we want to re-populate the rtas_data_buffer
160 * with the contents of the previous call.
161 */
162 spin_lock(&rtas_data_buf_lock);
163
164 memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
165 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
166 memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
167
168 spin_unlock(&rtas_data_buf_lock);
169
170 switch (rc) {
171 case COMPLETE:
172 break;
173
174 case NEXT_SIBLING:
175 dn = dlpar_parse_cc_node(ccwa);
176 if (!dn)
177 goto cc_error;
178
179 dn->parent = last_dn->parent;
180 last_dn->sibling = dn;
181 last_dn = dn;
182 break;
183
184 case NEXT_CHILD:
185 dn = dlpar_parse_cc_node(ccwa);
186 if (!dn)
187 goto cc_error;
188
189 if (!first_dn) {
190 dn->parent = parent;
191 first_dn = dn;
192 } else {
193 dn->parent = last_dn;
194 if (last_dn)
195 last_dn->child = dn;
196 }
197
198 last_dn = dn;
199 break;
200
201 case NEXT_PROPERTY:
202 property = dlpar_parse_cc_property(ccwa);
203 if (!property)
204 goto cc_error;
205
206 if (!last_dn->properties)
207 last_dn->properties = property;
208 else
209 last_property->next = property;
210
211 last_property = property;
212 break;
213
214 case PREV_PARENT:
215 last_dn = last_dn->parent;
216 break;
217
218 case CALL_AGAIN:
219 break;
220
221 case MORE_MEMORY:
222 case ERR_CFG_USE:
223 default:
224 printk(KERN_ERR "Unexpected Error (%d) "
225 "returned from configure-connector\n", rc);
226 goto cc_error;
227 }
228 } while (rc);
229
230 cc_error:
231 kfree(data_buf);
232
233 if (rc) {
234 if (first_dn)
235 dlpar_free_cc_nodes(first_dn);
236
237 return NULL;
238 }
239
240 return first_dn;
241 }
242
243 int dlpar_attach_node(struct device_node *dn, struct device_node *parent)
244 {
245 int rc;
246
247 dn->parent = parent;
248
249 rc = of_attach_node(dn);
250 if (rc) {
251 printk(KERN_ERR "Failed to add device node %pOF\n", dn);
252 return rc;
253 }
254
255 return 0;
256 }
257
258 int dlpar_detach_node(struct device_node *dn)
259 {
260 struct device_node *child;
261 int rc;
262
263 child = of_get_next_child(dn, NULL);
264 while (child) {
265 dlpar_detach_node(child);
266 child = of_get_next_child(dn, child);
267 }
268
269 rc = of_detach_node(dn);
270 if (rc)
271 return rc;
272
273 of_node_put(dn);
274
275 return 0;
276 }
277
278 #define DR_ENTITY_SENSE 9003
279 #define DR_ENTITY_PRESENT 1
280 #define DR_ENTITY_UNUSABLE 2
281 #define ALLOCATION_STATE 9003
282 #define ALLOC_UNUSABLE 0
283 #define ALLOC_USABLE 1
284 #define ISOLATION_STATE 9001
285 #define ISOLATE 0
286 #define UNISOLATE 1
287
288 int dlpar_acquire_drc(u32 drc_index)
289 {
290 int dr_status, rc;
291
292 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
293 DR_ENTITY_SENSE, drc_index);
294 if (rc || dr_status != DR_ENTITY_UNUSABLE)
295 return -1;
296
297 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
298 if (rc)
299 return rc;
300
301 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
302 if (rc) {
303 rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
304 return rc;
305 }
306
307 return 0;
308 }
309
310 int dlpar_release_drc(u32 drc_index)
311 {
312 int dr_status, rc;
313
314 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
315 DR_ENTITY_SENSE, drc_index);
316 if (rc || dr_status != DR_ENTITY_PRESENT)
317 return -1;
318
319 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
320 if (rc)
321 return rc;
322
323 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
324 if (rc) {
325 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
326 return rc;
327 }
328
329 return 0;
330 }
331
332 int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
333 {
334 int rc;
335
336 /* pseries error logs are in BE format, convert to cpu type */
337 switch (hp_elog->id_type) {
338 case PSERIES_HP_ELOG_ID_DRC_COUNT:
339 hp_elog->_drc_u.drc_count =
340 be32_to_cpu(hp_elog->_drc_u.drc_count);
341 break;
342 case PSERIES_HP_ELOG_ID_DRC_INDEX:
343 hp_elog->_drc_u.drc_index =
344 be32_to_cpu(hp_elog->_drc_u.drc_index);
345 break;
346 case PSERIES_HP_ELOG_ID_DRC_IC:
347 hp_elog->_drc_u.ic.count =
348 be32_to_cpu(hp_elog->_drc_u.ic.count);
349 hp_elog->_drc_u.ic.index =
350 be32_to_cpu(hp_elog->_drc_u.ic.index);
351 }
352
353 switch (hp_elog->resource) {
354 case PSERIES_HP_ELOG_RESOURCE_MEM:
355 rc = dlpar_memory(hp_elog);
356 break;
357 case PSERIES_HP_ELOG_RESOURCE_CPU:
358 rc = dlpar_cpu(hp_elog);
359 break;
360 case PSERIES_HP_ELOG_RESOURCE_PMEM:
361 rc = dlpar_hp_pmem(hp_elog);
362 break;
363
364 default:
365 pr_warn_ratelimited("Invalid resource (%d) specified\n",
366 hp_elog->resource);
367 rc = -EINVAL;
368 }
369
370 return rc;
371 }
372
373 static void pseries_hp_work_fn(struct work_struct *work)
374 {
375 struct pseries_hp_work *hp_work =
376 container_of(work, struct pseries_hp_work, work);
377
378 handle_dlpar_errorlog(hp_work->errlog);
379
380 kfree(hp_work->errlog);
381 kfree((void *)work);
382 }
383
384 void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
385 {
386 struct pseries_hp_work *work;
387 struct pseries_hp_errorlog *hp_errlog_copy;
388
389 hp_errlog_copy = kmalloc(sizeof(struct pseries_hp_errorlog),
390 GFP_KERNEL);
391 memcpy(hp_errlog_copy, hp_errlog, sizeof(struct pseries_hp_errorlog));
392
393 work = kmalloc(sizeof(struct pseries_hp_work), GFP_KERNEL);
394 if (work) {
395 INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
396 work->errlog = hp_errlog_copy;
397 queue_work(pseries_hp_wq, (struct work_struct *)work);
398 } else {
399 kfree(hp_errlog_copy);
400 }
401 }
402
403 static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
404 {
405 char *arg;
406
407 arg = strsep(cmd, " ");
408 if (!arg)
409 return -EINVAL;
410
411 if (sysfs_streq(arg, "memory")) {
412 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
413 } else if (sysfs_streq(arg, "cpu")) {
414 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
415 } else {
416 pr_err("Invalid resource specified.\n");
417 return -EINVAL;
418 }
419
420 return 0;
421 }
422
423 static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
424 {
425 char *arg;
426
427 arg = strsep(cmd, " ");
428 if (!arg)
429 return -EINVAL;
430
431 if (sysfs_streq(arg, "add")) {
432 hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
433 } else if (sysfs_streq(arg, "remove")) {
434 hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
435 } else {
436 pr_err("Invalid action specified.\n");
437 return -EINVAL;
438 }
439
440 return 0;
441 }
442
443 static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
444 {
445 char *arg;
446 u32 count, index;
447
448 arg = strsep(cmd, " ");
449 if (!arg)
450 return -EINVAL;
451
452 if (sysfs_streq(arg, "indexed-count")) {
453 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC;
454 arg = strsep(cmd, " ");
455 if (!arg) {
456 pr_err("No DRC count specified.\n");
457 return -EINVAL;
458 }
459
460 if (kstrtou32(arg, 0, &count)) {
461 pr_err("Invalid DRC count specified.\n");
462 return -EINVAL;
463 }
464
465 arg = strsep(cmd, " ");
466 if (!arg) {
467 pr_err("No DRC Index specified.\n");
468 return -EINVAL;
469 }
470
471 if (kstrtou32(arg, 0, &index)) {
472 pr_err("Invalid DRC Index specified.\n");
473 return -EINVAL;
474 }
475
476 hp_elog->_drc_u.ic.count = cpu_to_be32(count);
477 hp_elog->_drc_u.ic.index = cpu_to_be32(index);
478 } else if (sysfs_streq(arg, "index")) {
479 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
480 arg = strsep(cmd, " ");
481 if (!arg) {
482 pr_err("No DRC Index specified.\n");
483 return -EINVAL;
484 }
485
486 if (kstrtou32(arg, 0, &index)) {
487 pr_err("Invalid DRC Index specified.\n");
488 return -EINVAL;
489 }
490
491 hp_elog->_drc_u.drc_index = cpu_to_be32(index);
492 } else if (sysfs_streq(arg, "count")) {
493 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
494 arg = strsep(cmd, " ");
495 if (!arg) {
496 pr_err("No DRC count specified.\n");
497 return -EINVAL;
498 }
499
500 if (kstrtou32(arg, 0, &count)) {
501 pr_err("Invalid DRC count specified.\n");
502 return -EINVAL;
503 }
504
505 hp_elog->_drc_u.drc_count = cpu_to_be32(count);
506 } else {
507 pr_err("Invalid id_type specified.\n");
508 return -EINVAL;
509 }
510
511 return 0;
512 }
513
514 static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
515 const char *buf, size_t count)
516 {
517 struct pseries_hp_errorlog hp_elog;
518 char *argbuf;
519 char *args;
520 int rc;
521
522 args = argbuf = kstrdup(buf, GFP_KERNEL);
523 if (!argbuf) {
524 pr_info("Could not allocate resources for DLPAR operation\n");
525 kfree(argbuf);
526 return -ENOMEM;
527 }
528
529 /*
530 * Parse out the request from the user, this will be in the form:
531 * <resource> <action> <id_type> <id>
532 */
533 rc = dlpar_parse_resource(&args, &hp_elog);
534 if (rc)
535 goto dlpar_store_out;
536
537 rc = dlpar_parse_action(&args, &hp_elog);
538 if (rc)
539 goto dlpar_store_out;
540
541 rc = dlpar_parse_id_type(&args, &hp_elog);
542 if (rc)
543 goto dlpar_store_out;
544
545 rc = handle_dlpar_errorlog(&hp_elog);
546
547 dlpar_store_out:
548 kfree(argbuf);
549
550 if (rc)
551 pr_err("Could not handle DLPAR request \"%s\"\n", buf);
552
553 return rc ? rc : count;
554 }
555
556 static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
557 char *buf)
558 {
559 return sprintf(buf, "%s\n", "memory,cpu");
560 }
561
562 static CLASS_ATTR_RW(dlpar);
563
564 int __init dlpar_workqueue_init(void)
565 {
566 if (pseries_hp_wq)
567 return 0;
568
569 pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
570 WQ_UNBOUND, 1);
571
572 return pseries_hp_wq ? 0 : -ENOMEM;
573 }
574
575 static int __init dlpar_sysfs_init(void)
576 {
577 int rc;
578
579 rc = dlpar_workqueue_init();
580 if (rc)
581 return rc;
582
583 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
584 }
585 machine_device_initcall(pseries, dlpar_sysfs_init);
586