x86: Move call to print_modules() out of show_regs()
[cascardo/linux.git] / arch / mips / oprofile / op_model_mipsxx.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004, 05, 06 by Ralf Baechle
7  * Copyright (C) 2005 by MIPS Technologies, Inc.
8  */
9 #include <linux/cpumask.h>
10 #include <linux/oprofile.h>
11 #include <linux/interrupt.h>
12 #include <linux/smp.h>
13 #include <asm/irq_regs.h>
14
15 #include "op_impl.h"
16
17 #define M_PERFCTL_EXL                   (1UL      <<  0)
18 #define M_PERFCTL_KERNEL                (1UL      <<  1)
19 #define M_PERFCTL_SUPERVISOR            (1UL      <<  2)
20 #define M_PERFCTL_USER                  (1UL      <<  3)
21 #define M_PERFCTL_INTERRUPT_ENABLE      (1UL      <<  4)
22 #define M_PERFCTL_EVENT(event)          (((event) & 0x3ff)  << 5)
23 #define M_PERFCTL_VPEID(vpe)            ((vpe)    << 16)
24 #define M_PERFCTL_MT_EN(filter)         ((filter) << 20)
25 #define    M_TC_EN_ALL                  M_PERFCTL_MT_EN(0)
26 #define    M_TC_EN_VPE                  M_PERFCTL_MT_EN(1)
27 #define    M_TC_EN_TC                   M_PERFCTL_MT_EN(2)
28 #define M_PERFCTL_TCID(tcid)            ((tcid)   << 22)
29 #define M_PERFCTL_WIDE                  (1UL      << 30)
30 #define M_PERFCTL_MORE                  (1UL      << 31)
31
32 #define M_COUNTER_OVERFLOW              (1UL      << 31)
33
34 static int (*save_perf_irq)(void);
35
36 #ifdef CONFIG_MIPS_MT_SMP
37 static int cpu_has_mipsmt_pertccounters;
38 #define WHAT            (M_TC_EN_VPE | \
39                          M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
40 #define vpe_id()        (cpu_has_mipsmt_pertccounters ? \
41                         0 : cpu_data[smp_processor_id()].vpe_id)
42
43 /*
44  * The number of bits to shift to convert between counters per core and
45  * counters per VPE.  There is no reasonable interface atm to obtain the
46  * number of VPEs used by Linux and in the 34K this number is fixed to two
47  * anyways so we hardcore a few things here for the moment.  The way it's
48  * done here will ensure that oprofile VSMP kernel will run right on a lesser
49  * core like a 24K also or with maxcpus=1.
50  */
51 static inline unsigned int vpe_shift(void)
52 {
53         if (num_possible_cpus() > 1)
54                 return 1;
55
56         return 0;
57 }
58
59 #else
60
61 #define WHAT            0
62 #define vpe_id()        0
63
64 static inline unsigned int vpe_shift(void)
65 {
66         return 0;
67 }
68
69 #endif
70
71 static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
72 {
73         return counters >> vpe_shift();
74 }
75
76 static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
77 {
78         return counters << vpe_shift();
79 }
80
81 #define __define_perf_accessors(r, n, np)                               \
82                                                                         \
83 static inline unsigned int r_c0_ ## r ## n(void)                        \
84 {                                                                       \
85         unsigned int cpu = vpe_id();                                    \
86                                                                         \
87         switch (cpu) {                                                  \
88         case 0:                                                         \
89                 return read_c0_ ## r ## n();                            \
90         case 1:                                                         \
91                 return read_c0_ ## r ## np();                           \
92         default:                                                        \
93                 BUG();                                                  \
94         }                                                               \
95         return 0;                                                       \
96 }                                                                       \
97                                                                         \
98 static inline void w_c0_ ## r ## n(unsigned int value)                  \
99 {                                                                       \
100         unsigned int cpu = vpe_id();                                    \
101                                                                         \
102         switch (cpu) {                                                  \
103         case 0:                                                         \
104                 write_c0_ ## r ## n(value);                             \
105                 return;                                                 \
106         case 1:                                                         \
107                 write_c0_ ## r ## np(value);                            \
108                 return;                                                 \
109         default:                                                        \
110                 BUG();                                                  \
111         }                                                               \
112         return;                                                         \
113 }                                                                       \
114
115 __define_perf_accessors(perfcntr, 0, 2)
116 __define_perf_accessors(perfcntr, 1, 3)
117 __define_perf_accessors(perfcntr, 2, 0)
118 __define_perf_accessors(perfcntr, 3, 1)
119
120 __define_perf_accessors(perfctrl, 0, 2)
121 __define_perf_accessors(perfctrl, 1, 3)
122 __define_perf_accessors(perfctrl, 2, 0)
123 __define_perf_accessors(perfctrl, 3, 1)
124
125 struct op_mips_model op_model_mipsxx_ops;
126
127 static struct mipsxx_register_config {
128         unsigned int control[4];
129         unsigned int counter[4];
130 } reg;
131
132 /* Compute all of the registers in preparation for enabling profiling.  */
133
134 static void mipsxx_reg_setup(struct op_counter_config *ctr)
135 {
136         unsigned int counters = op_model_mipsxx_ops.num_counters;
137         int i;
138
139         /* Compute the performance counter control word.  */
140         for (i = 0; i < counters; i++) {
141                 reg.control[i] = 0;
142                 reg.counter[i] = 0;
143
144                 if (!ctr[i].enabled)
145                         continue;
146
147                 reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
148                                  M_PERFCTL_INTERRUPT_ENABLE;
149                 if (ctr[i].kernel)
150                         reg.control[i] |= M_PERFCTL_KERNEL;
151                 if (ctr[i].user)
152                         reg.control[i] |= M_PERFCTL_USER;
153                 if (ctr[i].exl)
154                         reg.control[i] |= M_PERFCTL_EXL;
155                 reg.counter[i] = 0x80000000 - ctr[i].count;
156         }
157 }
158
159 /* Program all of the registers in preparation for enabling profiling.  */
160
161 static void mipsxx_cpu_setup(void *args)
162 {
163         unsigned int counters = op_model_mipsxx_ops.num_counters;
164
165         switch (counters) {
166         case 4:
167                 w_c0_perfctrl3(0);
168                 w_c0_perfcntr3(reg.counter[3]);
169         case 3:
170                 w_c0_perfctrl2(0);
171                 w_c0_perfcntr2(reg.counter[2]);
172         case 2:
173                 w_c0_perfctrl1(0);
174                 w_c0_perfcntr1(reg.counter[1]);
175         case 1:
176                 w_c0_perfctrl0(0);
177                 w_c0_perfcntr0(reg.counter[0]);
178         }
179 }
180
181 /* Start all counters on current CPU */
182 static void mipsxx_cpu_start(void *args)
183 {
184         unsigned int counters = op_model_mipsxx_ops.num_counters;
185
186         switch (counters) {
187         case 4:
188                 w_c0_perfctrl3(WHAT | reg.control[3]);
189         case 3:
190                 w_c0_perfctrl2(WHAT | reg.control[2]);
191         case 2:
192                 w_c0_perfctrl1(WHAT | reg.control[1]);
193         case 1:
194                 w_c0_perfctrl0(WHAT | reg.control[0]);
195         }
196 }
197
198 /* Stop all counters on current CPU */
199 static void mipsxx_cpu_stop(void *args)
200 {
201         unsigned int counters = op_model_mipsxx_ops.num_counters;
202
203         switch (counters) {
204         case 4:
205                 w_c0_perfctrl3(0);
206         case 3:
207                 w_c0_perfctrl2(0);
208         case 2:
209                 w_c0_perfctrl1(0);
210         case 1:
211                 w_c0_perfctrl0(0);
212         }
213 }
214
215 static int mipsxx_perfcount_handler(void)
216 {
217         unsigned int counters = op_model_mipsxx_ops.num_counters;
218         unsigned int control;
219         unsigned int counter;
220         int handled = IRQ_NONE;
221
222         if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
223                 return handled;
224
225         switch (counters) {
226 #define HANDLE_COUNTER(n)                                               \
227         case n + 1:                                                     \
228                 control = r_c0_perfctrl ## n();                         \
229                 counter = r_c0_perfcntr ## n();                         \
230                 if ((control & M_PERFCTL_INTERRUPT_ENABLE) &&           \
231                     (counter & M_COUNTER_OVERFLOW)) {                   \
232                         oprofile_add_sample(get_irq_regs(), n);         \
233                         w_c0_perfcntr ## n(reg.counter[n]);             \
234                         handled = IRQ_HANDLED;                          \
235                 }
236         HANDLE_COUNTER(3)
237         HANDLE_COUNTER(2)
238         HANDLE_COUNTER(1)
239         HANDLE_COUNTER(0)
240         }
241
242         return handled;
243 }
244
245 #define M_CONFIG1_PC    (1 << 4)
246
247 static inline int __n_counters(void)
248 {
249         if (!(read_c0_config1() & M_CONFIG1_PC))
250                 return 0;
251         if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
252                 return 1;
253         if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
254                 return 2;
255         if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
256                 return 3;
257
258         return 4;
259 }
260
261 static inline int n_counters(void)
262 {
263         int counters;
264
265         switch (current_cpu_type()) {
266         case CPU_R10000:
267                 counters = 2;
268                 break;
269
270         case CPU_R12000:
271         case CPU_R14000:
272                 counters = 4;
273                 break;
274
275         default:
276                 counters = __n_counters();
277         }
278
279         return counters;
280 }
281
282 static void reset_counters(void *arg)
283 {
284         int counters = (int)(long)arg;
285         switch (counters) {
286         case 4:
287                 w_c0_perfctrl3(0);
288                 w_c0_perfcntr3(0);
289         case 3:
290                 w_c0_perfctrl2(0);
291                 w_c0_perfcntr2(0);
292         case 2:
293                 w_c0_perfctrl1(0);
294                 w_c0_perfcntr1(0);
295         case 1:
296                 w_c0_perfctrl0(0);
297                 w_c0_perfcntr0(0);
298         }
299 }
300
301 static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
302 {
303         return mipsxx_perfcount_handler();
304 }
305
306 static int __init mipsxx_init(void)
307 {
308         int counters;
309
310         counters = n_counters();
311         if (counters == 0) {
312                 printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
313                 return -ENODEV;
314         }
315
316 #ifdef CONFIG_MIPS_MT_SMP
317         cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
318         if (!cpu_has_mipsmt_pertccounters)
319                 counters = counters_total_to_per_cpu(counters);
320 #endif
321         on_each_cpu(reset_counters, (void *)(long)counters, 1);
322
323         op_model_mipsxx_ops.num_counters = counters;
324         switch (current_cpu_type()) {
325         case CPU_20KC:
326                 op_model_mipsxx_ops.cpu_type = "mips/20K";
327                 break;
328
329         case CPU_24K:
330                 op_model_mipsxx_ops.cpu_type = "mips/24K";
331                 break;
332
333         case CPU_25KF:
334                 op_model_mipsxx_ops.cpu_type = "mips/25K";
335                 break;
336
337         case CPU_1004K:
338 #if 0
339                 /* FIXME: report as 34K for now */
340                 op_model_mipsxx_ops.cpu_type = "mips/1004K";
341                 break;
342 #endif
343
344         case CPU_34K:
345                 op_model_mipsxx_ops.cpu_type = "mips/34K";
346                 break;
347
348         case CPU_74K:
349                 op_model_mipsxx_ops.cpu_type = "mips/74K";
350                 break;
351
352         case CPU_5KC:
353                 op_model_mipsxx_ops.cpu_type = "mips/5K";
354                 break;
355
356         case CPU_R10000:
357                 if ((current_cpu_data.processor_id & 0xff) == 0x20)
358                         op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
359                 else
360                         op_model_mipsxx_ops.cpu_type = "mips/r10000";
361                 break;
362
363         case CPU_R12000:
364         case CPU_R14000:
365                 op_model_mipsxx_ops.cpu_type = "mips/r12000";
366                 break;
367
368         case CPU_SB1:
369         case CPU_SB1A:
370                 op_model_mipsxx_ops.cpu_type = "mips/sb1";
371                 break;
372
373         default:
374                 printk(KERN_ERR "Profiling unsupported for this CPU\n");
375
376                 return -ENODEV;
377         }
378
379         save_perf_irq = perf_irq;
380         perf_irq = mipsxx_perfcount_handler;
381
382         if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
383                 return request_irq(cp0_perfcount_irq, mipsxx_perfcount_int,
384                         0, "Perfcounter", save_perf_irq);
385
386         return 0;
387 }
388
389 static void mipsxx_exit(void)
390 {
391         int counters = op_model_mipsxx_ops.num_counters;
392
393         if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
394                 free_irq(cp0_perfcount_irq, save_perf_irq);
395
396         counters = counters_per_cpu_to_total(counters);
397         on_each_cpu(reset_counters, (void *)(long)counters, 1);
398
399         perf_irq = save_perf_irq;
400 }
401
402 struct op_mips_model op_model_mipsxx_ops = {
403         .reg_setup      = mipsxx_reg_setup,
404         .cpu_setup      = mipsxx_cpu_setup,
405         .init           = mipsxx_init,
406         .exit           = mipsxx_exit,
407         .cpu_start      = mipsxx_cpu_start,
408         .cpu_stop       = mipsxx_cpu_stop,
409 };