bcma: use NS prefix for names of Northstar specific cores
[cascardo/linux.git] / arch / powerpc / platforms / pseries / dlpar.c
1 /*
2  * Support for dynamic reconfiguration for PCI, Memory, and CPU
3  * Hotplug and Dynamic Logical Partitioning on RPA platforms.
4  *
5  * Copyright (C) 2009 Nathan Fontenot
6  * Copyright (C) 2009 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/notifier.h>
15 #include <linux/spinlock.h>
16 #include <linux/cpu.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include "offline_states.h"
20
21 #include <asm/prom.h>
22 #include <asm/machdep.h>
23 #include <asm/uaccess.h>
24 #include <asm/rtas.h>
25
26 struct cc_workarea {
27         u32     drc_index;
28         u32     zero;
29         u32     name_offset;
30         u32     prop_length;
31         u32     prop_offset;
32 };
33
34 void dlpar_free_cc_property(struct property *prop)
35 {
36         kfree(prop->name);
37         kfree(prop->value);
38         kfree(prop);
39 }
40
41 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
42 {
43         struct property *prop;
44         char *name;
45         char *value;
46
47         prop = kzalloc(sizeof(*prop), GFP_KERNEL);
48         if (!prop)
49                 return NULL;
50
51         name = (char *)ccwa + ccwa->name_offset;
52         prop->name = kstrdup(name, GFP_KERNEL);
53
54         prop->length = ccwa->prop_length;
55         value = (char *)ccwa + ccwa->prop_offset;
56         prop->value = kmemdup(value, prop->length, GFP_KERNEL);
57         if (!prop->value) {
58                 dlpar_free_cc_property(prop);
59                 return NULL;
60         }
61
62         return prop;
63 }
64
65 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
66                                                const char *path)
67 {
68         struct device_node *dn;
69         char *name;
70
71         /* If parent node path is "/" advance path to NULL terminator to
72          * prevent double leading slashs in full_name.
73          */
74         if (!path[1])
75                 path++;
76
77         dn = kzalloc(sizeof(*dn), GFP_KERNEL);
78         if (!dn)
79                 return NULL;
80
81         name = (char *)ccwa + ccwa->name_offset;
82         dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name);
83         if (!dn->full_name) {
84                 kfree(dn);
85                 return NULL;
86         }
87
88         of_node_set_flag(dn, OF_DYNAMIC);
89
90         return dn;
91 }
92
93 static void dlpar_free_one_cc_node(struct device_node *dn)
94 {
95         struct property *prop;
96
97         while (dn->properties) {
98                 prop = dn->properties;
99                 dn->properties = prop->next;
100                 dlpar_free_cc_property(prop);
101         }
102
103         kfree(dn->full_name);
104         kfree(dn);
105 }
106
107 void dlpar_free_cc_nodes(struct device_node *dn)
108 {
109         if (dn->child)
110                 dlpar_free_cc_nodes(dn->child);
111
112         if (dn->sibling)
113                 dlpar_free_cc_nodes(dn->sibling);
114
115         dlpar_free_one_cc_node(dn);
116 }
117
118 #define COMPLETE        0
119 #define NEXT_SIBLING    1
120 #define NEXT_CHILD      2
121 #define NEXT_PROPERTY   3
122 #define PREV_PARENT     4
123 #define MORE_MEMORY     5
124 #define CALL_AGAIN      -2
125 #define ERR_CFG_USE     -9003
126
127 struct device_node *dlpar_configure_connector(u32 drc_index,
128                                               struct device_node *parent)
129 {
130         struct device_node *dn;
131         struct device_node *first_dn = NULL;
132         struct device_node *last_dn = NULL;
133         struct property *property;
134         struct property *last_property = NULL;
135         struct cc_workarea *ccwa;
136         char *data_buf;
137         const char *parent_path = parent->full_name;
138         int cc_token;
139         int rc = -1;
140
141         cc_token = rtas_token("ibm,configure-connector");
142         if (cc_token == RTAS_UNKNOWN_SERVICE)
143                 return NULL;
144
145         data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
146         if (!data_buf)
147                 return NULL;
148
149         ccwa = (struct cc_workarea *)&data_buf[0];
150         ccwa->drc_index = drc_index;
151         ccwa->zero = 0;
152
153         do {
154                 /* Since we release the rtas_data_buf lock between configure
155                  * connector calls we want to re-populate the rtas_data_buffer
156                  * with the contents of the previous call.
157                  */
158                 spin_lock(&rtas_data_buf_lock);
159
160                 memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
161                 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
162                 memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
163
164                 spin_unlock(&rtas_data_buf_lock);
165
166                 switch (rc) {
167                 case COMPLETE:
168                         break;
169
170                 case NEXT_SIBLING:
171                         dn = dlpar_parse_cc_node(ccwa, parent_path);
172                         if (!dn)
173                                 goto cc_error;
174
175                         dn->parent = last_dn->parent;
176                         last_dn->sibling = dn;
177                         last_dn = dn;
178                         break;
179
180                 case NEXT_CHILD:
181                         if (first_dn)
182                                 parent_path = last_dn->full_name;
183
184                         dn = dlpar_parse_cc_node(ccwa, parent_path);
185                         if (!dn)
186                                 goto cc_error;
187
188                         if (!first_dn) {
189                                 dn->parent = parent;
190                                 first_dn = dn;
191                         } else {
192                                 dn->parent = last_dn;
193                                 if (last_dn)
194                                         last_dn->child = dn;
195                         }
196
197                         last_dn = dn;
198                         break;
199
200                 case NEXT_PROPERTY:
201                         property = dlpar_parse_cc_property(ccwa);
202                         if (!property)
203                                 goto cc_error;
204
205                         if (!last_dn->properties)
206                                 last_dn->properties = property;
207                         else
208                                 last_property->next = property;
209
210                         last_property = property;
211                         break;
212
213                 case PREV_PARENT:
214                         last_dn = last_dn->parent;
215                         parent_path = last_dn->parent->full_name;
216                         break;
217
218                 case CALL_AGAIN:
219                         break;
220
221                 case MORE_MEMORY:
222                 case ERR_CFG_USE:
223                 default:
224                         printk(KERN_ERR "Unexpected Error (%d) "
225                                "returned from configure-connector\n", rc);
226                         goto cc_error;
227                 }
228         } while (rc);
229
230 cc_error:
231         kfree(data_buf);
232
233         if (rc) {
234                 if (first_dn)
235                         dlpar_free_cc_nodes(first_dn);
236
237                 return NULL;
238         }
239
240         return first_dn;
241 }
242
243 static struct device_node *derive_parent(const char *path)
244 {
245         struct device_node *parent;
246         char *last_slash;
247
248         last_slash = strrchr(path, '/');
249         if (last_slash == path) {
250                 parent = of_find_node_by_path("/");
251         } else {
252                 char *parent_path;
253                 int parent_path_len = last_slash - path + 1;
254                 parent_path = kmalloc(parent_path_len, GFP_KERNEL);
255                 if (!parent_path)
256                         return NULL;
257
258                 strlcpy(parent_path, path, parent_path_len);
259                 parent = of_find_node_by_path(parent_path);
260                 kfree(parent_path);
261         }
262
263         return parent;
264 }
265
266 int dlpar_attach_node(struct device_node *dn)
267 {
268         int rc;
269
270         dn->parent = derive_parent(dn->full_name);
271         if (!dn->parent)
272                 return -ENOMEM;
273
274         rc = of_attach_node(dn);
275         if (rc) {
276                 printk(KERN_ERR "Failed to add device node %s\n",
277                        dn->full_name);
278                 return rc;
279         }
280
281         of_node_put(dn->parent);
282         return 0;
283 }
284
285 int dlpar_detach_node(struct device_node *dn)
286 {
287         struct device_node *child;
288         int rc;
289
290         child = of_get_next_child(dn, NULL);
291         while (child) {
292                 dlpar_detach_node(child);
293                 child = of_get_next_child(dn, child);
294         }
295
296         rc = of_detach_node(dn);
297         if (rc)
298                 return rc;
299
300         of_node_put(dn); /* Must decrement the refcount */
301         return 0;
302 }
303
304 #define DR_ENTITY_SENSE         9003
305 #define DR_ENTITY_PRESENT       1
306 #define DR_ENTITY_UNUSABLE      2
307 #define ALLOCATION_STATE        9003
308 #define ALLOC_UNUSABLE          0
309 #define ALLOC_USABLE            1
310 #define ISOLATION_STATE         9001
311 #define ISOLATE                 0
312 #define UNISOLATE               1
313
314 int dlpar_acquire_drc(u32 drc_index)
315 {
316         int dr_status, rc;
317
318         rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
319                        DR_ENTITY_SENSE, drc_index);
320         if (rc || dr_status != DR_ENTITY_UNUSABLE)
321                 return -1;
322
323         rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
324         if (rc)
325                 return rc;
326
327         rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
328         if (rc) {
329                 rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
330                 return rc;
331         }
332
333         return 0;
334 }
335
336 int dlpar_release_drc(u32 drc_index)
337 {
338         int dr_status, rc;
339
340         rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
341                        DR_ENTITY_SENSE, drc_index);
342         if (rc || dr_status != DR_ENTITY_PRESENT)
343                 return -1;
344
345         rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
346         if (rc)
347                 return rc;
348
349         rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
350         if (rc) {
351                 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
352                 return rc;
353         }
354
355         return 0;
356 }
357
358 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
359
360 static int dlpar_online_cpu(struct device_node *dn)
361 {
362         int rc = 0;
363         unsigned int cpu;
364         int len, nthreads, i;
365         const u32 *intserv;
366
367         intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
368         if (!intserv)
369                 return -EINVAL;
370
371         nthreads = len / sizeof(u32);
372
373         cpu_maps_update_begin();
374         for (i = 0; i < nthreads; i++) {
375                 for_each_present_cpu(cpu) {
376                         if (get_hard_smp_processor_id(cpu) != intserv[i])
377                                 continue;
378                         BUG_ON(get_cpu_current_state(cpu)
379                                         != CPU_STATE_OFFLINE);
380                         cpu_maps_update_done();
381                         rc = cpu_up(cpu);
382                         if (rc)
383                                 goto out;
384                         cpu_maps_update_begin();
385
386                         break;
387                 }
388                 if (cpu == num_possible_cpus())
389                         printk(KERN_WARNING "Could not find cpu to online "
390                                "with physical id 0x%x\n", intserv[i]);
391         }
392         cpu_maps_update_done();
393
394 out:
395         return rc;
396
397 }
398
399 static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
400 {
401         struct device_node *dn, *parent;
402         unsigned long drc_index;
403         int rc;
404
405         rc = strict_strtoul(buf, 0, &drc_index);
406         if (rc)
407                 return -EINVAL;
408
409         parent = of_find_node_by_path("/cpus");
410         if (!parent)
411                 return -ENODEV;
412
413         dn = dlpar_configure_connector(drc_index, parent);
414         if (!dn)
415                 return -EINVAL;
416
417         of_node_put(parent);
418
419         rc = dlpar_acquire_drc(drc_index);
420         if (rc) {
421                 dlpar_free_cc_nodes(dn);
422                 return -EINVAL;
423         }
424
425         rc = dlpar_attach_node(dn);
426         if (rc) {
427                 dlpar_release_drc(drc_index);
428                 dlpar_free_cc_nodes(dn);
429                 return rc;
430         }
431
432         rc = dlpar_online_cpu(dn);
433         if (rc)
434                 return rc;
435
436         return count;
437 }
438
439 static int dlpar_offline_cpu(struct device_node *dn)
440 {
441         int rc = 0;
442         unsigned int cpu;
443         int len, nthreads, i;
444         const u32 *intserv;
445
446         intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
447         if (!intserv)
448                 return -EINVAL;
449
450         nthreads = len / sizeof(u32);
451
452         cpu_maps_update_begin();
453         for (i = 0; i < nthreads; i++) {
454                 for_each_present_cpu(cpu) {
455                         if (get_hard_smp_processor_id(cpu) != intserv[i])
456                                 continue;
457
458                         if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
459                                 break;
460
461                         if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
462                                 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
463                                 cpu_maps_update_done();
464                                 rc = cpu_down(cpu);
465                                 if (rc)
466                                         goto out;
467                                 cpu_maps_update_begin();
468                                 break;
469
470                         }
471
472                         /*
473                          * The cpu is in CPU_STATE_INACTIVE.
474                          * Upgrade it's state to CPU_STATE_OFFLINE.
475                          */
476                         set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
477                         BUG_ON(plpar_hcall_norets(H_PROD, intserv[i])
478                                                                 != H_SUCCESS);
479                         __cpu_die(cpu);
480                         break;
481                 }
482                 if (cpu == num_possible_cpus())
483                         printk(KERN_WARNING "Could not find cpu to offline "
484                                "with physical id 0x%x\n", intserv[i]);
485         }
486         cpu_maps_update_done();
487
488 out:
489         return rc;
490
491 }
492
493 static ssize_t dlpar_cpu_release(const char *buf, size_t count)
494 {
495         struct device_node *dn;
496         const u32 *drc_index;
497         int rc;
498
499         dn = of_find_node_by_path(buf);
500         if (!dn)
501                 return -EINVAL;
502
503         drc_index = of_get_property(dn, "ibm,my-drc-index", NULL);
504         if (!drc_index) {
505                 of_node_put(dn);
506                 return -EINVAL;
507         }
508
509         rc = dlpar_offline_cpu(dn);
510         if (rc) {
511                 of_node_put(dn);
512                 return -EINVAL;
513         }
514
515         rc = dlpar_release_drc(*drc_index);
516         if (rc) {
517                 of_node_put(dn);
518                 return rc;
519         }
520
521         rc = dlpar_detach_node(dn);
522         if (rc) {
523                 dlpar_acquire_drc(*drc_index);
524                 return rc;
525         }
526
527         of_node_put(dn);
528
529         return count;
530 }
531
532 static int __init pseries_dlpar_init(void)
533 {
534         ppc_md.cpu_probe = dlpar_cpu_probe;
535         ppc_md.cpu_release = dlpar_cpu_release;
536
537         return 0;
538 }
539 machine_device_initcall(pseries, pseries_dlpar_init);
540
541 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */