Merge tag 'iwlwifi-next-for-kalle-2014-12-30' of https://git.kernel.org/pub/scm/linux...
[cascardo/linux.git] / arch / mips / oprofile / op_model_mipsxx.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004, 05, 06 by Ralf Baechle
7  * Copyright (C) 2005 by MIPS Technologies, Inc.
8  */
9 #include <linux/cpumask.h>
10 #include <linux/oprofile.h>
11 #include <linux/interrupt.h>
12 #include <linux/smp.h>
13 #include <asm/irq_regs.h>
14 #include <asm/time.h>
15
16 #include "op_impl.h"
17
18 #define M_PERFCTL_EXL                   (1UL      <<  0)
19 #define M_PERFCTL_KERNEL                (1UL      <<  1)
20 #define M_PERFCTL_SUPERVISOR            (1UL      <<  2)
21 #define M_PERFCTL_USER                  (1UL      <<  3)
22 #define M_PERFCTL_INTERRUPT_ENABLE      (1UL      <<  4)
23 #define M_PERFCTL_EVENT(event)          (((event) & 0x3ff)  << 5)
24 #define M_PERFCTL_VPEID(vpe)            ((vpe)    << 16)
25 #define M_PERFCTL_MT_EN(filter)         ((filter) << 20)
26 #define    M_TC_EN_ALL                  M_PERFCTL_MT_EN(0)
27 #define    M_TC_EN_VPE                  M_PERFCTL_MT_EN(1)
28 #define    M_TC_EN_TC                   M_PERFCTL_MT_EN(2)
29 #define M_PERFCTL_TCID(tcid)            ((tcid)   << 22)
30 #define M_PERFCTL_WIDE                  (1UL      << 30)
31 #define M_PERFCTL_MORE                  (1UL      << 31)
32
33 #define M_COUNTER_OVERFLOW              (1UL      << 31)
34
35 /* Netlogic XLR specific, count events in all threads in a core */
36 #define M_PERFCTL_COUNT_ALL_THREADS     (1UL      << 13)
37
38 static int (*save_perf_irq)(void);
39 static int perfcount_irq;
40
41 /*
42  * XLR has only one set of counters per core. Designate the
43  * first hardware thread in the core for setup and init.
44  * Skip CPUs with non-zero hardware thread id (4 hwt per core)
45  */
46 #if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
47 #define oprofile_skip_cpu(c)    ((cpu_logical_map(c) & 0x3) != 0)
48 #else
49 #define oprofile_skip_cpu(c)    0
50 #endif
51
52 #ifdef CONFIG_MIPS_MT_SMP
53 static int cpu_has_mipsmt_pertccounters;
54 #define WHAT            (M_TC_EN_VPE | \
55                          M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
56 #define vpe_id()        (cpu_has_mipsmt_pertccounters ? \
57                         0 : cpu_data[smp_processor_id()].vpe_id)
58
59 /*
60  * The number of bits to shift to convert between counters per core and
61  * counters per VPE.  There is no reasonable interface atm to obtain the
62  * number of VPEs used by Linux and in the 34K this number is fixed to two
63  * anyways so we hardcore a few things here for the moment.  The way it's
64  * done here will ensure that oprofile VSMP kernel will run right on a lesser
65  * core like a 24K also or with maxcpus=1.
66  */
67 static inline unsigned int vpe_shift(void)
68 {
69         if (num_possible_cpus() > 1)
70                 return 1;
71
72         return 0;
73 }
74
75 #else
76
77 #define WHAT            0
78 #define vpe_id()        0
79
80 static inline unsigned int vpe_shift(void)
81 {
82         return 0;
83 }
84
85 #endif
86
87 static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
88 {
89         return counters >> vpe_shift();
90 }
91
92 static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
93 {
94         return counters << vpe_shift();
95 }
96
97 #define __define_perf_accessors(r, n, np)                               \
98                                                                         \
99 static inline unsigned int r_c0_ ## r ## n(void)                        \
100 {                                                                       \
101         unsigned int cpu = vpe_id();                                    \
102                                                                         \
103         switch (cpu) {                                                  \
104         case 0:                                                         \
105                 return read_c0_ ## r ## n();                            \
106         case 1:                                                         \
107                 return read_c0_ ## r ## np();                           \
108         default:                                                        \
109                 BUG();                                                  \
110         }                                                               \
111         return 0;                                                       \
112 }                                                                       \
113                                                                         \
114 static inline void w_c0_ ## r ## n(unsigned int value)                  \
115 {                                                                       \
116         unsigned int cpu = vpe_id();                                    \
117                                                                         \
118         switch (cpu) {                                                  \
119         case 0:                                                         \
120                 write_c0_ ## r ## n(value);                             \
121                 return;                                                 \
122         case 1:                                                         \
123                 write_c0_ ## r ## np(value);                            \
124                 return;                                                 \
125         default:                                                        \
126                 BUG();                                                  \
127         }                                                               \
128         return;                                                         \
129 }                                                                       \
130
131 __define_perf_accessors(perfcntr, 0, 2)
132 __define_perf_accessors(perfcntr, 1, 3)
133 __define_perf_accessors(perfcntr, 2, 0)
134 __define_perf_accessors(perfcntr, 3, 1)
135
136 __define_perf_accessors(perfctrl, 0, 2)
137 __define_perf_accessors(perfctrl, 1, 3)
138 __define_perf_accessors(perfctrl, 2, 0)
139 __define_perf_accessors(perfctrl, 3, 1)
140
141 struct op_mips_model op_model_mipsxx_ops;
142
143 static struct mipsxx_register_config {
144         unsigned int control[4];
145         unsigned int counter[4];
146 } reg;
147
148 /* Compute all of the registers in preparation for enabling profiling.  */
149
150 static void mipsxx_reg_setup(struct op_counter_config *ctr)
151 {
152         unsigned int counters = op_model_mipsxx_ops.num_counters;
153         int i;
154
155         /* Compute the performance counter control word.  */
156         for (i = 0; i < counters; i++) {
157                 reg.control[i] = 0;
158                 reg.counter[i] = 0;
159
160                 if (!ctr[i].enabled)
161                         continue;
162
163                 reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
164                                  M_PERFCTL_INTERRUPT_ENABLE;
165                 if (ctr[i].kernel)
166                         reg.control[i] |= M_PERFCTL_KERNEL;
167                 if (ctr[i].user)
168                         reg.control[i] |= M_PERFCTL_USER;
169                 if (ctr[i].exl)
170                         reg.control[i] |= M_PERFCTL_EXL;
171                 if (boot_cpu_type() == CPU_XLR)
172                         reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;
173                 reg.counter[i] = 0x80000000 - ctr[i].count;
174         }
175 }
176
177 /* Program all of the registers in preparation for enabling profiling.  */
178
179 static void mipsxx_cpu_setup(void *args)
180 {
181         unsigned int counters = op_model_mipsxx_ops.num_counters;
182
183         if (oprofile_skip_cpu(smp_processor_id()))
184                 return;
185
186         switch (counters) {
187         case 4:
188                 w_c0_perfctrl3(0);
189                 w_c0_perfcntr3(reg.counter[3]);
190         case 3:
191                 w_c0_perfctrl2(0);
192                 w_c0_perfcntr2(reg.counter[2]);
193         case 2:
194                 w_c0_perfctrl1(0);
195                 w_c0_perfcntr1(reg.counter[1]);
196         case 1:
197                 w_c0_perfctrl0(0);
198                 w_c0_perfcntr0(reg.counter[0]);
199         }
200 }
201
202 /* Start all counters on current CPU */
203 static void mipsxx_cpu_start(void *args)
204 {
205         unsigned int counters = op_model_mipsxx_ops.num_counters;
206
207         if (oprofile_skip_cpu(smp_processor_id()))
208                 return;
209
210         switch (counters) {
211         case 4:
212                 w_c0_perfctrl3(WHAT | reg.control[3]);
213         case 3:
214                 w_c0_perfctrl2(WHAT | reg.control[2]);
215         case 2:
216                 w_c0_perfctrl1(WHAT | reg.control[1]);
217         case 1:
218                 w_c0_perfctrl0(WHAT | reg.control[0]);
219         }
220 }
221
222 /* Stop all counters on current CPU */
223 static void mipsxx_cpu_stop(void *args)
224 {
225         unsigned int counters = op_model_mipsxx_ops.num_counters;
226
227         if (oprofile_skip_cpu(smp_processor_id()))
228                 return;
229
230         switch (counters) {
231         case 4:
232                 w_c0_perfctrl3(0);
233         case 3:
234                 w_c0_perfctrl2(0);
235         case 2:
236                 w_c0_perfctrl1(0);
237         case 1:
238                 w_c0_perfctrl0(0);
239         }
240 }
241
242 static int mipsxx_perfcount_handler(void)
243 {
244         unsigned int counters = op_model_mipsxx_ops.num_counters;
245         unsigned int control;
246         unsigned int counter;
247         int handled = IRQ_NONE;
248
249         if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
250                 return handled;
251
252         switch (counters) {
253 #define HANDLE_COUNTER(n)                                               \
254         case n + 1:                                                     \
255                 control = r_c0_perfctrl ## n();                         \
256                 counter = r_c0_perfcntr ## n();                         \
257                 if ((control & M_PERFCTL_INTERRUPT_ENABLE) &&           \
258                     (counter & M_COUNTER_OVERFLOW)) {                   \
259                         oprofile_add_sample(get_irq_regs(), n);         \
260                         w_c0_perfcntr ## n(reg.counter[n]);             \
261                         handled = IRQ_HANDLED;                          \
262                 }
263         HANDLE_COUNTER(3)
264         HANDLE_COUNTER(2)
265         HANDLE_COUNTER(1)
266         HANDLE_COUNTER(0)
267         }
268
269         return handled;
270 }
271
272 #define M_CONFIG1_PC    (1 << 4)
273
274 static inline int __n_counters(void)
275 {
276         if (!(read_c0_config1() & M_CONFIG1_PC))
277                 return 0;
278         if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
279                 return 1;
280         if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
281                 return 2;
282         if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
283                 return 3;
284
285         return 4;
286 }
287
288 static inline int n_counters(void)
289 {
290         int counters;
291
292         switch (current_cpu_type()) {
293         case CPU_R10000:
294                 counters = 2;
295                 break;
296
297         case CPU_R12000:
298         case CPU_R14000:
299                 counters = 4;
300                 break;
301
302         default:
303                 counters = __n_counters();
304         }
305
306         return counters;
307 }
308
309 static void reset_counters(void *arg)
310 {
311         int counters = (int)(long)arg;
312         switch (counters) {
313         case 4:
314                 w_c0_perfctrl3(0);
315                 w_c0_perfcntr3(0);
316         case 3:
317                 w_c0_perfctrl2(0);
318                 w_c0_perfcntr2(0);
319         case 2:
320                 w_c0_perfctrl1(0);
321                 w_c0_perfcntr1(0);
322         case 1:
323                 w_c0_perfctrl0(0);
324                 w_c0_perfcntr0(0);
325         }
326 }
327
328 static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
329 {
330         return mipsxx_perfcount_handler();
331 }
332
333 static int __init mipsxx_init(void)
334 {
335         int counters;
336
337         counters = n_counters();
338         if (counters == 0) {
339                 printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
340                 return -ENODEV;
341         }
342
343 #ifdef CONFIG_MIPS_MT_SMP
344         cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
345         if (!cpu_has_mipsmt_pertccounters)
346                 counters = counters_total_to_per_cpu(counters);
347 #endif
348         on_each_cpu(reset_counters, (void *)(long)counters, 1);
349
350         op_model_mipsxx_ops.num_counters = counters;
351         switch (current_cpu_type()) {
352         case CPU_M14KC:
353                 op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
354                 break;
355
356         case CPU_M14KEC:
357                 op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
358                 break;
359
360         case CPU_20KC:
361                 op_model_mipsxx_ops.cpu_type = "mips/20K";
362                 break;
363
364         case CPU_24K:
365                 op_model_mipsxx_ops.cpu_type = "mips/24K";
366                 break;
367
368         case CPU_25KF:
369                 op_model_mipsxx_ops.cpu_type = "mips/25K";
370                 break;
371
372         case CPU_1004K:
373         case CPU_34K:
374                 op_model_mipsxx_ops.cpu_type = "mips/34K";
375                 break;
376
377         case CPU_1074K:
378         case CPU_74K:
379                 op_model_mipsxx_ops.cpu_type = "mips/74K";
380                 break;
381
382         case CPU_INTERAPTIV:
383                 op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
384                 break;
385
386         case CPU_PROAPTIV:
387                 op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
388                 break;
389
390         case CPU_P5600:
391                 op_model_mipsxx_ops.cpu_type = "mips/P5600";
392                 break;
393
394         case CPU_M5150:
395                 op_model_mipsxx_ops.cpu_type = "mips/M5150";
396                 break;
397
398         case CPU_5KC:
399                 op_model_mipsxx_ops.cpu_type = "mips/5K";
400                 break;
401
402         case CPU_R10000:
403                 if ((current_cpu_data.processor_id & 0xff) == 0x20)
404                         op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
405                 else
406                         op_model_mipsxx_ops.cpu_type = "mips/r10000";
407                 break;
408
409         case CPU_R12000:
410         case CPU_R14000:
411                 op_model_mipsxx_ops.cpu_type = "mips/r12000";
412                 break;
413
414         case CPU_SB1:
415         case CPU_SB1A:
416                 op_model_mipsxx_ops.cpu_type = "mips/sb1";
417                 break;
418
419         case CPU_LOONGSON1:
420                 op_model_mipsxx_ops.cpu_type = "mips/loongson1";
421                 break;
422
423         case CPU_XLR:
424                 op_model_mipsxx_ops.cpu_type = "mips/xlr";
425                 break;
426
427         default:
428                 printk(KERN_ERR "Profiling unsupported for this CPU\n");
429
430                 return -ENODEV;
431         }
432
433         save_perf_irq = perf_irq;
434         perf_irq = mipsxx_perfcount_handler;
435
436         if (get_c0_perfcount_int)
437                 perfcount_irq = get_c0_perfcount_int();
438         else if ((cp0_perfcount_irq >= 0) &&
439                  (cp0_compare_irq != cp0_perfcount_irq))
440                 perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
441         else
442                 perfcount_irq = -1;
443
444         if (perfcount_irq >= 0)
445                 return request_irq(perfcount_irq, mipsxx_perfcount_int,
446                         0, "Perfcounter", save_perf_irq);
447
448         return 0;
449 }
450
451 static void mipsxx_exit(void)
452 {
453         int counters = op_model_mipsxx_ops.num_counters;
454
455         if (perfcount_irq >= 0)
456                 free_irq(perfcount_irq, save_perf_irq);
457
458         counters = counters_per_cpu_to_total(counters);
459         on_each_cpu(reset_counters, (void *)(long)counters, 1);
460
461         perf_irq = save_perf_irq;
462 }
463
464 struct op_mips_model op_model_mipsxx_ops = {
465         .reg_setup      = mipsxx_reg_setup,
466         .cpu_setup      = mipsxx_cpu_setup,
467         .init           = mipsxx_init,
468         .exit           = mipsxx_exit,
469         .cpu_start      = mipsxx_cpu_start,
470         .cpu_stop       = mipsxx_cpu_stop,
471 };