Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
[cascardo/linux.git] / arch / s390 / kernel / topology.c
1 /*
2  *    Copyright IBM Corp. 2007, 2011
3  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4  */
5
6 #define KMSG_COMPONENT "cpu"
7 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8
9 #include <linux/workqueue.h>
10 #include <linux/bootmem.h>
11 #include <linux/cpuset.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/init.h>
16 #include <linux/delay.h>
17 #include <linux/cpu.h>
18 #include <linux/smp.h>
19 #include <linux/mm.h>
20 #include <asm/sysinfo.h>
21
22 #define PTF_HORIZONTAL  (0UL)
23 #define PTF_VERTICAL    (1UL)
24 #define PTF_CHECK       (2UL)
25
26 struct mask_info {
27         struct mask_info *next;
28         unsigned char id;
29         cpumask_t mask;
30 };
31
32 static int topology_enabled = 1;
33 static void topology_work_fn(struct work_struct *work);
34 static struct sysinfo_15_1_x *tl_info;
35 static void set_topology_timer(void);
36 static DECLARE_WORK(topology_work, topology_work_fn);
37 /* topology_lock protects the core linked list */
38 static DEFINE_SPINLOCK(topology_lock);
39
40 static struct mask_info core_info;
41 cpumask_t cpu_core_map[NR_CPUS];
42 unsigned char cpu_core_id[NR_CPUS];
43
44 static struct mask_info book_info;
45 cpumask_t cpu_book_map[NR_CPUS];
46 unsigned char cpu_book_id[NR_CPUS];
47
48 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
49 {
50         cpumask_t mask;
51
52         cpumask_clear(&mask);
53         if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
54                 cpumask_copy(&mask, cpumask_of(cpu));
55                 return mask;
56         }
57         while (info) {
58                 if (cpumask_test_cpu(cpu, &info->mask)) {
59                         mask = info->mask;
60                         break;
61                 }
62                 info = info->next;
63         }
64         if (cpumask_empty(&mask))
65                 cpumask_copy(&mask, cpumask_of(cpu));
66         return mask;
67 }
68
69 static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
70                                           struct mask_info *book,
71                                           struct mask_info *core,
72                                           int one_core_per_cpu)
73 {
74         unsigned int cpu;
75
76         for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) {
77                 unsigned int rcpu;
78                 int lcpu;
79
80                 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
81                 lcpu = smp_find_processor_id(rcpu);
82                 if (lcpu >= 0) {
83                         cpumask_set_cpu(lcpu, &book->mask);
84                         cpu_book_id[lcpu] = book->id;
85                         cpumask_set_cpu(lcpu, &core->mask);
86                         if (one_core_per_cpu) {
87                                 cpu_core_id[lcpu] = rcpu;
88                                 core = core->next;
89                         } else {
90                                 cpu_core_id[lcpu] = core->id;
91                         }
92                         smp_cpu_set_polarization(lcpu, tl_cpu->pp);
93                 }
94         }
95         return core;
96 }
97
98 static void clear_masks(void)
99 {
100         struct mask_info *info;
101
102         info = &core_info;
103         while (info) {
104                 cpumask_clear(&info->mask);
105                 info = info->next;
106         }
107         info = &book_info;
108         while (info) {
109                 cpumask_clear(&info->mask);
110                 info = info->next;
111         }
112 }
113
114 static union topology_entry *next_tle(union topology_entry *tle)
115 {
116         if (!tle->nl)
117                 return (union topology_entry *)((struct topology_cpu *)tle + 1);
118         return (union topology_entry *)((struct topology_container *)tle + 1);
119 }
120
121 static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
122 {
123         struct mask_info *core = &core_info;
124         struct mask_info *book = &book_info;
125         union topology_entry *tle, *end;
126
127         tle = info->tle;
128         end = (union topology_entry *)((unsigned long)info + info->length);
129         while (tle < end) {
130                 switch (tle->nl) {
131                 case 2:
132                         book = book->next;
133                         book->id = tle->container.id;
134                         break;
135                 case 1:
136                         core = core->next;
137                         core->id = tle->container.id;
138                         break;
139                 case 0:
140                         add_cpus_to_mask(&tle->cpu, book, core, 0);
141                         break;
142                 default:
143                         clear_masks();
144                         return;
145                 }
146                 tle = next_tle(tle);
147         }
148 }
149
150 static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
151 {
152         struct mask_info *core = &core_info;
153         struct mask_info *book = &book_info;
154         union topology_entry *tle, *end;
155
156         tle = info->tle;
157         end = (union topology_entry *)((unsigned long)info + info->length);
158         while (tle < end) {
159                 switch (tle->nl) {
160                 case 1:
161                         book = book->next;
162                         book->id = tle->container.id;
163                         break;
164                 case 0:
165                         core = add_cpus_to_mask(&tle->cpu, book, core, 1);
166                         break;
167                 default:
168                         clear_masks();
169                         return;
170                 }
171                 tle = next_tle(tle);
172         }
173 }
174
175 static void tl_to_cores(struct sysinfo_15_1_x *info)
176 {
177         struct cpuid cpu_id;
178
179         get_cpu_id(&cpu_id);
180         spin_lock_irq(&topology_lock);
181         clear_masks();
182         switch (cpu_id.machine) {
183         case 0x2097:
184         case 0x2098:
185                 __tl_to_cores_z10(info);
186                 break;
187         default:
188                 __tl_to_cores_generic(info);
189         }
190         spin_unlock_irq(&topology_lock);
191 }
192
193 static void topology_update_polarization_simple(void)
194 {
195         int cpu;
196
197         mutex_lock(&smp_cpu_state_mutex);
198         for_each_possible_cpu(cpu)
199                 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
200         mutex_unlock(&smp_cpu_state_mutex);
201 }
202
203 static int ptf(unsigned long fc)
204 {
205         int rc;
206
207         asm volatile(
208                 "       .insn   rre,0xb9a20000,%1,%1\n"
209                 "       ipm     %0\n"
210                 "       srl     %0,28\n"
211                 : "=d" (rc)
212                 : "d" (fc)  : "cc");
213         return rc;
214 }
215
216 int topology_set_cpu_management(int fc)
217 {
218         int cpu, rc;
219
220         if (!MACHINE_HAS_TOPOLOGY)
221                 return -EOPNOTSUPP;
222         if (fc)
223                 rc = ptf(PTF_VERTICAL);
224         else
225                 rc = ptf(PTF_HORIZONTAL);
226         if (rc)
227                 return -EBUSY;
228         for_each_possible_cpu(cpu)
229                 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
230         return rc;
231 }
232
233 static void update_cpu_core_map(void)
234 {
235         unsigned long flags;
236         int cpu;
237
238         spin_lock_irqsave(&topology_lock, flags);
239         for_each_possible_cpu(cpu) {
240                 cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
241                 cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
242         }
243         spin_unlock_irqrestore(&topology_lock, flags);
244 }
245
246 void store_topology(struct sysinfo_15_1_x *info)
247 {
248         if (topology_max_mnest >= 3)
249                 stsi(info, 15, 1, 3);
250         else
251                 stsi(info, 15, 1, 2);
252 }
253
254 int arch_update_cpu_topology(void)
255 {
256         struct sysinfo_15_1_x *info = tl_info;
257         struct device *dev;
258         int cpu;
259
260         if (!MACHINE_HAS_TOPOLOGY) {
261                 update_cpu_core_map();
262                 topology_update_polarization_simple();
263                 return 0;
264         }
265         store_topology(info);
266         tl_to_cores(info);
267         update_cpu_core_map();
268         for_each_online_cpu(cpu) {
269                 dev = get_cpu_device(cpu);
270                 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
271         }
272         return 1;
273 }
274
275 static void topology_work_fn(struct work_struct *work)
276 {
277         rebuild_sched_domains();
278 }
279
280 void topology_schedule_update(void)
281 {
282         schedule_work(&topology_work);
283 }
284
285 static void topology_timer_fn(unsigned long ignored)
286 {
287         if (ptf(PTF_CHECK))
288                 topology_schedule_update();
289         set_topology_timer();
290 }
291
292 static struct timer_list topology_timer =
293         TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
294
295 static atomic_t topology_poll = ATOMIC_INIT(0);
296
297 static void set_topology_timer(void)
298 {
299         if (atomic_add_unless(&topology_poll, -1, 0))
300                 mod_timer(&topology_timer, jiffies + HZ / 10);
301         else
302                 mod_timer(&topology_timer, jiffies + HZ * 60);
303 }
304
305 void topology_expect_change(void)
306 {
307         if (!MACHINE_HAS_TOPOLOGY)
308                 return;
309         /* This is racy, but it doesn't matter since it is just a heuristic.
310          * Worst case is that we poll in a higher frequency for a bit longer.
311          */
312         if (atomic_read(&topology_poll) > 60)
313                 return;
314         atomic_add(60, &topology_poll);
315         set_topology_timer();
316 }
317
318 static int __init early_parse_topology(char *p)
319 {
320         if (strncmp(p, "off", 3))
321                 return 0;
322         topology_enabled = 0;
323         return 0;
324 }
325 early_param("topology", early_parse_topology);
326
327 static void __init alloc_masks(struct sysinfo_15_1_x *info,
328                                struct mask_info *mask, int offset)
329 {
330         int i, nr_masks;
331
332         nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
333         for (i = 0; i < info->mnest - offset; i++)
334                 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
335         nr_masks = max(nr_masks, 1);
336         for (i = 0; i < nr_masks; i++) {
337                 mask->next = alloc_bootmem(sizeof(struct mask_info));
338                 mask = mask->next;
339         }
340 }
341
342 void __init s390_init_cpu_topology(void)
343 {
344         struct sysinfo_15_1_x *info;
345         int i;
346
347         if (!MACHINE_HAS_TOPOLOGY)
348                 return;
349         tl_info = alloc_bootmem_pages(PAGE_SIZE);
350         info = tl_info;
351         store_topology(info);
352         pr_info("The CPU configuration topology of the machine is:");
353         for (i = 0; i < TOPOLOGY_NR_MAG; i++)
354                 printk(KERN_CONT " %d", info->mag[i]);
355         printk(KERN_CONT " / %d\n", info->mnest);
356         alloc_masks(info, &core_info, 1);
357         alloc_masks(info, &book_info, 2);
358 }
359
360 static int cpu_management;
361
362 static ssize_t dispatching_show(struct device *dev,
363                                 struct device_attribute *attr,
364                                 char *buf)
365 {
366         ssize_t count;
367
368         mutex_lock(&smp_cpu_state_mutex);
369         count = sprintf(buf, "%d\n", cpu_management);
370         mutex_unlock(&smp_cpu_state_mutex);
371         return count;
372 }
373
374 static ssize_t dispatching_store(struct device *dev,
375                                  struct device_attribute *attr,
376                                  const char *buf,
377                                  size_t count)
378 {
379         int val, rc;
380         char delim;
381
382         if (sscanf(buf, "%d %c", &val, &delim) != 1)
383                 return -EINVAL;
384         if (val != 0 && val != 1)
385                 return -EINVAL;
386         rc = 0;
387         get_online_cpus();
388         mutex_lock(&smp_cpu_state_mutex);
389         if (cpu_management == val)
390                 goto out;
391         rc = topology_set_cpu_management(val);
392         if (rc)
393                 goto out;
394         cpu_management = val;
395         topology_expect_change();
396 out:
397         mutex_unlock(&smp_cpu_state_mutex);
398         put_online_cpus();
399         return rc ? rc : count;
400 }
401 static DEVICE_ATTR(dispatching, 0644, dispatching_show,
402                          dispatching_store);
403
404 static ssize_t cpu_polarization_show(struct device *dev,
405                                      struct device_attribute *attr, char *buf)
406 {
407         int cpu = dev->id;
408         ssize_t count;
409
410         mutex_lock(&smp_cpu_state_mutex);
411         switch (smp_cpu_get_polarization(cpu)) {
412         case POLARIZATION_HRZ:
413                 count = sprintf(buf, "horizontal\n");
414                 break;
415         case POLARIZATION_VL:
416                 count = sprintf(buf, "vertical:low\n");
417                 break;
418         case POLARIZATION_VM:
419                 count = sprintf(buf, "vertical:medium\n");
420                 break;
421         case POLARIZATION_VH:
422                 count = sprintf(buf, "vertical:high\n");
423                 break;
424         default:
425                 count = sprintf(buf, "unknown\n");
426                 break;
427         }
428         mutex_unlock(&smp_cpu_state_mutex);
429         return count;
430 }
431 static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
432
433 static struct attribute *topology_cpu_attrs[] = {
434         &dev_attr_polarization.attr,
435         NULL,
436 };
437
438 static struct attribute_group topology_cpu_attr_group = {
439         .attrs = topology_cpu_attrs,
440 };
441
442 int topology_cpu_init(struct cpu *cpu)
443 {
444         return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
445 }
446
447 static int __init topology_init(void)
448 {
449         if (!MACHINE_HAS_TOPOLOGY) {
450                 topology_update_polarization_simple();
451                 goto out;
452         }
453         set_topology_timer();
454 out:
455         update_cpu_core_map();
456         return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
457 }
458 device_initcall(topology_init);