Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[cascardo/linux.git] / arch / alpha / kernel / smp.c
1 /*
2  *      linux/arch/alpha/kernel/smp.c
3  *
4  *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5  *            Renamed modified smp_call_function to smp_call_function_on_cpu()
6  *            Created an function that conforms to the old calling convention
7  *            of smp_call_function().
8  *
9  *            This is helpful for DCPI.
10  *
11  */
12
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/err.h>
20 #include <linux/threads.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
30 #include <linux/cpu.h>
31
32 #include <asm/hwrpb.h>
33 #include <asm/ptrace.h>
34 #include <linux/atomic.h>
35
36 #include <asm/io.h>
37 #include <asm/irq.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/mmu_context.h>
41 #include <asm/tlbflush.h>
42
43 #include "proto.h"
44 #include "irq_impl.h"
45
46
47 #define DEBUG_SMP 0
48 #if DEBUG_SMP
49 #define DBGS(args)      printk args
50 #else
51 #define DBGS(args)
52 #endif
53
54 /* A collection of per-processor data.  */
55 struct cpuinfo_alpha cpu_data[NR_CPUS];
56 EXPORT_SYMBOL(cpu_data);
57
58 /* A collection of single bit ipi messages.  */
59 static struct {
60         unsigned long bits ____cacheline_aligned;
61 } ipi_data[NR_CPUS] __cacheline_aligned;
62
63 enum ipi_message_type {
64         IPI_RESCHEDULE,
65         IPI_CALL_FUNC,
66         IPI_CALL_FUNC_SINGLE,
67         IPI_CPU_STOP,
68 };
69
70 /* Set to a secondary's cpuid when it comes online.  */
71 static int smp_secondary_alive __devinitdata = 0;
72
73 int smp_num_probed;             /* Internal processor count */
74 int smp_num_cpus = 1;           /* Number that came online.  */
75 EXPORT_SYMBOL(smp_num_cpus);
76
77 /*
78  * Called by both boot and secondaries to move global data into
79  *  per-processor storage.
80  */
81 static inline void __init
82 smp_store_cpu_info(int cpuid)
83 {
84         cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
85         cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
86         cpu_data[cpuid].need_new_asn = 0;
87         cpu_data[cpuid].asn_lock = 0;
88 }
89
90 /*
91  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
92  */
93 static inline void __init
94 smp_setup_percpu_timer(int cpuid)
95 {
96         cpu_data[cpuid].prof_counter = 1;
97         cpu_data[cpuid].prof_multiplier = 1;
98 }
99
100 static void __init
101 wait_boot_cpu_to_stop(int cpuid)
102 {
103         unsigned long stop = jiffies + 10*HZ;
104
105         while (time_before(jiffies, stop)) {
106                 if (!smp_secondary_alive)
107                         return;
108                 barrier();
109         }
110
111         printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
112         for (;;)
113                 barrier();
114 }
115
116 /*
117  * Where secondaries begin a life of C.
118  */
119 void __cpuinit
120 smp_callin(void)
121 {
122         int cpuid = hard_smp_processor_id();
123
124         if (cpu_online(cpuid)) {
125                 printk("??, cpu 0x%x already present??\n", cpuid);
126                 BUG();
127         }
128         set_cpu_online(cpuid, true);
129
130         /* Turn on machine checks.  */
131         wrmces(7);
132
133         /* Set trap vectors.  */
134         trap_init();
135
136         /* Set interrupt vector.  */
137         wrent(entInt, 0);
138
139         /* Get our local ticker going. */
140         smp_setup_percpu_timer(cpuid);
141
142         /* Call platform-specific callin, if specified */
143         if (alpha_mv.smp_callin) alpha_mv.smp_callin();
144
145         /* All kernel threads share the same mm context.  */
146         atomic_inc(&init_mm.mm_count);
147         current->active_mm = &init_mm;
148
149         /* inform the notifiers about the new cpu */
150         notify_cpu_starting(cpuid);
151
152         /* Must have completely accurate bogos.  */
153         local_irq_enable();
154
155         /* Wait boot CPU to stop with irq enabled before running
156            calibrate_delay. */
157         wait_boot_cpu_to_stop(cpuid);
158         mb();
159         calibrate_delay();
160
161         smp_store_cpu_info(cpuid);
162         /* Allow master to continue only after we written loops_per_jiffy.  */
163         wmb();
164         smp_secondary_alive = 1;
165
166         DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
167               cpuid, current, current->active_mm));
168
169         /* Do nothing.  */
170         cpu_idle();
171 }
172
173 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
174 static int __devinit
175 wait_for_txrdy (unsigned long cpumask)
176 {
177         unsigned long timeout;
178
179         if (!(hwrpb->txrdy & cpumask))
180                 return 0;
181
182         timeout = jiffies + 10*HZ;
183         while (time_before(jiffies, timeout)) {
184                 if (!(hwrpb->txrdy & cpumask))
185                         return 0;
186                 udelay(10);
187                 barrier();
188         }
189
190         return -1;
191 }
192
193 /*
194  * Send a message to a secondary's console.  "START" is one such
195  * interesting message.  ;-)
196  */
197 static void __cpuinit
198 send_secondary_console_msg(char *str, int cpuid)
199 {
200         struct percpu_struct *cpu;
201         register char *cp1, *cp2;
202         unsigned long cpumask;
203         size_t len;
204
205         cpu = (struct percpu_struct *)
206                 ((char*)hwrpb
207                  + hwrpb->processor_offset
208                  + cpuid * hwrpb->processor_size);
209
210         cpumask = (1UL << cpuid);
211         if (wait_for_txrdy(cpumask))
212                 goto timeout;
213
214         cp2 = str;
215         len = strlen(cp2);
216         *(unsigned int *)&cpu->ipc_buffer[0] = len;
217         cp1 = (char *) &cpu->ipc_buffer[1];
218         memcpy(cp1, cp2, len);
219
220         /* atomic test and set */
221         wmb();
222         set_bit(cpuid, &hwrpb->rxrdy);
223
224         if (wait_for_txrdy(cpumask))
225                 goto timeout;
226         return;
227
228  timeout:
229         printk("Processor %x not ready\n", cpuid);
230 }
231
232 /*
233  * A secondary console wants to send a message.  Receive it.
234  */
235 static void
236 recv_secondary_console_msg(void)
237 {
238         int mycpu, i, cnt;
239         unsigned long txrdy = hwrpb->txrdy;
240         char *cp1, *cp2, buf[80];
241         struct percpu_struct *cpu;
242
243         DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
244
245         mycpu = hard_smp_processor_id();
246
247         for (i = 0; i < NR_CPUS; i++) {
248                 if (!(txrdy & (1UL << i)))
249                         continue;
250
251                 DBGS(("recv_secondary_console_msg: "
252                       "TXRDY contains CPU %d.\n", i));
253
254                 cpu = (struct percpu_struct *)
255                   ((char*)hwrpb
256                    + hwrpb->processor_offset
257                    + i * hwrpb->processor_size);
258
259                 DBGS(("recv_secondary_console_msg: on %d from %d"
260                       " HALT_REASON 0x%lx FLAGS 0x%lx\n",
261                       mycpu, i, cpu->halt_reason, cpu->flags));
262
263                 cnt = cpu->ipc_buffer[0] >> 32;
264                 if (cnt <= 0 || cnt >= 80)
265                         strcpy(buf, "<<< BOGUS MSG >>>");
266                 else {
267                         cp1 = (char *) &cpu->ipc_buffer[11];
268                         cp2 = buf;
269                         strcpy(cp2, cp1);
270                         
271                         while ((cp2 = strchr(cp2, '\r')) != 0) {
272                                 *cp2 = ' ';
273                                 if (cp2[1] == '\n')
274                                         cp2[1] = ' ';
275                         }
276                 }
277
278                 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
279                       "message is '%s'\n", mycpu, buf));
280         }
281
282         hwrpb->txrdy = 0;
283 }
284
285 /*
286  * Convince the console to have a secondary cpu begin execution.
287  */
288 static int __cpuinit
289 secondary_cpu_start(int cpuid, struct task_struct *idle)
290 {
291         struct percpu_struct *cpu;
292         struct pcb_struct *hwpcb, *ipcb;
293         unsigned long timeout;
294           
295         cpu = (struct percpu_struct *)
296                 ((char*)hwrpb
297                  + hwrpb->processor_offset
298                  + cpuid * hwrpb->processor_size);
299         hwpcb = (struct pcb_struct *) cpu->hwpcb;
300         ipcb = &task_thread_info(idle)->pcb;
301
302         /* Initialize the CPU's HWPCB to something just good enough for
303            us to get started.  Immediately after starting, we'll swpctx
304            to the target idle task's pcb.  Reuse the stack in the mean
305            time.  Precalculate the target PCBB.  */
306         hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
307         hwpcb->usp = 0;
308         hwpcb->ptbr = ipcb->ptbr;
309         hwpcb->pcc = 0;
310         hwpcb->asn = 0;
311         hwpcb->unique = virt_to_phys(ipcb);
312         hwpcb->flags = ipcb->flags;
313         hwpcb->res1 = hwpcb->res2 = 0;
314
315 #if 0
316         DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
317               hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
318 #endif
319         DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
320               cpuid, idle->state, ipcb->flags));
321
322         /* Setup HWRPB fields that SRM uses to activate secondary CPU */
323         hwrpb->CPU_restart = __smp_callin;
324         hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
325
326         /* Recalculate and update the HWRPB checksum */
327         hwrpb_update_checksum(hwrpb);
328
329         /*
330          * Send a "start" command to the specified processor.
331          */
332
333         /* SRM III 3.4.1.3 */
334         cpu->flags |= 0x22;     /* turn on Context Valid and Restart Capable */
335         cpu->flags &= ~1;       /* turn off Bootstrap In Progress */
336         wmb();
337
338         send_secondary_console_msg("START\r\n", cpuid);
339
340         /* Wait 10 seconds for an ACK from the console.  */
341         timeout = jiffies + 10*HZ;
342         while (time_before(jiffies, timeout)) {
343                 if (cpu->flags & 1)
344                         goto started;
345                 udelay(10);
346                 barrier();
347         }
348         printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
349         return -1;
350
351  started:
352         DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
353         return 0;
354 }
355
356 /*
357  * Bring one cpu online.
358  */
359 static int __cpuinit
360 smp_boot_one_cpu(int cpuid, struct task_struct *idle)
361 {
362         unsigned long timeout;
363
364         /* Signal the secondary to wait a moment.  */
365         smp_secondary_alive = -1;
366
367         /* Whirrr, whirrr, whirrrrrrrrr... */
368         if (secondary_cpu_start(cpuid, idle))
369                 return -1;
370
371         /* Notify the secondary CPU it can run calibrate_delay.  */
372         mb();
373         smp_secondary_alive = 0;
374
375         /* We've been acked by the console; wait one second for
376            the task to start up for real.  */
377         timeout = jiffies + 1*HZ;
378         while (time_before(jiffies, timeout)) {
379                 if (smp_secondary_alive == 1)
380                         goto alive;
381                 udelay(10);
382                 barrier();
383         }
384
385         /* We failed to boot the CPU.  */
386
387         printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
388         return -1;
389
390  alive:
391         /* Another "Red Snapper". */
392         return 0;
393 }
394
395 /*
396  * Called from setup_arch.  Detect an SMP system and which processors
397  * are present.
398  */
399 void __init
400 setup_smp(void)
401 {
402         struct percpu_struct *cpubase, *cpu;
403         unsigned long i;
404
405         if (boot_cpuid != 0) {
406                 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
407                        boot_cpuid);
408         }
409
410         if (hwrpb->nr_processors > 1) {
411                 int boot_cpu_palrev;
412
413                 DBGS(("setup_smp: nr_processors %ld\n",
414                       hwrpb->nr_processors));
415
416                 cpubase = (struct percpu_struct *)
417                         ((char*)hwrpb + hwrpb->processor_offset);
418                 boot_cpu_palrev = cpubase->pal_revision;
419
420                 for (i = 0; i < hwrpb->nr_processors; i++) {
421                         cpu = (struct percpu_struct *)
422                                 ((char *)cpubase + i*hwrpb->processor_size);
423                         if ((cpu->flags & 0x1cc) == 0x1cc) {
424                                 smp_num_probed++;
425                                 set_cpu_possible(i, true);
426                                 set_cpu_present(i, true);
427                                 cpu->pal_revision = boot_cpu_palrev;
428                         }
429
430                         DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
431                               i, cpu->flags, cpu->type));
432                         DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
433                               i, cpu->pal_revision));
434                 }
435         } else {
436                 smp_num_probed = 1;
437         }
438
439         printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
440                smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
441 }
442
443 /*
444  * Called by smp_init prepare the secondaries
445  */
446 void __init
447 smp_prepare_cpus(unsigned int max_cpus)
448 {
449         /* Take care of some initial bookkeeping.  */
450         memset(ipi_data, 0, sizeof(ipi_data));
451
452         current_thread_info()->cpu = boot_cpuid;
453
454         smp_store_cpu_info(boot_cpuid);
455         smp_setup_percpu_timer(boot_cpuid);
456
457         /* Nothing to do on a UP box, or when told not to.  */
458         if (smp_num_probed == 1 || max_cpus == 0) {
459                 init_cpu_possible(cpumask_of(boot_cpuid));
460                 init_cpu_present(cpumask_of(boot_cpuid));
461                 printk(KERN_INFO "SMP mode deactivated.\n");
462                 return;
463         }
464
465         printk(KERN_INFO "SMP starting up secondaries.\n");
466
467         smp_num_cpus = smp_num_probed;
468 }
469
470 void __devinit
471 smp_prepare_boot_cpu(void)
472 {
473 }
474
475 int __cpuinit
476 __cpu_up(unsigned int cpu, struct task_struct *tidle)
477 {
478         smp_boot_one_cpu(cpu, tidle);
479
480         return cpu_online(cpu) ? 0 : -ENOSYS;
481 }
482
483 void __init
484 smp_cpus_done(unsigned int max_cpus)
485 {
486         int cpu;
487         unsigned long bogosum = 0;
488
489         for(cpu = 0; cpu < NR_CPUS; cpu++) 
490                 if (cpu_online(cpu))
491                         bogosum += cpu_data[cpu].loops_per_jiffy;
492         
493         printk(KERN_INFO "SMP: Total of %d processors activated "
494                "(%lu.%02lu BogoMIPS).\n",
495                num_online_cpus(), 
496                (bogosum + 2500) / (500000/HZ),
497                ((bogosum + 2500) / (5000/HZ)) % 100);
498 }
499
500 \f
501 void
502 smp_percpu_timer_interrupt(struct pt_regs *regs)
503 {
504         struct pt_regs *old_regs;
505         int cpu = smp_processor_id();
506         unsigned long user = user_mode(regs);
507         struct cpuinfo_alpha *data = &cpu_data[cpu];
508
509         old_regs = set_irq_regs(regs);
510
511         /* Record kernel PC.  */
512         profile_tick(CPU_PROFILING);
513
514         if (!--data->prof_counter) {
515                 /* We need to make like a normal interrupt -- otherwise
516                    timer interrupts ignore the global interrupt lock,
517                    which would be a Bad Thing.  */
518                 irq_enter();
519
520                 update_process_times(user);
521
522                 data->prof_counter = data->prof_multiplier;
523
524                 irq_exit();
525         }
526         set_irq_regs(old_regs);
527 }
528
529 int
530 setup_profiling_timer(unsigned int multiplier)
531 {
532         return -EINVAL;
533 }
534
535 \f
536 static void
537 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
538 {
539         int i;
540
541         mb();
542         for_each_cpu(i, to_whom)
543                 set_bit(operation, &ipi_data[i].bits);
544
545         mb();
546         for_each_cpu(i, to_whom)
547                 wripir(i);
548 }
549
550 void
551 handle_ipi(struct pt_regs *regs)
552 {
553         int this_cpu = smp_processor_id();
554         unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
555         unsigned long ops;
556
557 #if 0
558         DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
559               this_cpu, *pending_ipis, regs->pc));
560 #endif
561
562         mb();   /* Order interrupt and bit testing. */
563         while ((ops = xchg(pending_ipis, 0)) != 0) {
564           mb(); /* Order bit clearing and data access. */
565           do {
566                 unsigned long which;
567
568                 which = ops & -ops;
569                 ops &= ~which;
570                 which = __ffs(which);
571
572                 switch (which) {
573                 case IPI_RESCHEDULE:
574                         scheduler_ipi();
575                         break;
576
577                 case IPI_CALL_FUNC:
578                         generic_smp_call_function_interrupt();
579                         break;
580
581                 case IPI_CALL_FUNC_SINGLE:
582                         generic_smp_call_function_single_interrupt();
583                         break;
584
585                 case IPI_CPU_STOP:
586                         halt();
587
588                 default:
589                         printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
590                                this_cpu, which);
591                         break;
592                 }
593           } while (ops);
594
595           mb(); /* Order data access and bit testing. */
596         }
597
598         cpu_data[this_cpu].ipi_count++;
599
600         if (hwrpb->txrdy)
601                 recv_secondary_console_msg();
602 }
603
604 void
605 smp_send_reschedule(int cpu)
606 {
607 #ifdef DEBUG_IPI_MSG
608         if (cpu == hard_smp_processor_id())
609                 printk(KERN_WARNING
610                        "smp_send_reschedule: Sending IPI to self.\n");
611 #endif
612         send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
613 }
614
615 void
616 smp_send_stop(void)
617 {
618         cpumask_t to_whom;
619         cpumask_copy(&to_whom, cpu_possible_mask);
620         cpumask_clear_cpu(smp_processor_id(), &to_whom);
621 #ifdef DEBUG_IPI_MSG
622         if (hard_smp_processor_id() != boot_cpu_id)
623                 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
624 #endif
625         send_ipi_message(&to_whom, IPI_CPU_STOP);
626 }
627
628 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
629 {
630         send_ipi_message(mask, IPI_CALL_FUNC);
631 }
632
633 void arch_send_call_function_single_ipi(int cpu)
634 {
635         send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
636 }
637
638 static void
639 ipi_imb(void *ignored)
640 {
641         imb();
642 }
643
644 void
645 smp_imb(void)
646 {
647         /* Must wait other processors to flush their icache before continue. */
648         if (on_each_cpu(ipi_imb, NULL, 1))
649                 printk(KERN_CRIT "smp_imb: timed out\n");
650 }
651 EXPORT_SYMBOL(smp_imb);
652
653 static void
654 ipi_flush_tlb_all(void *ignored)
655 {
656         tbia();
657 }
658
659 void
660 flush_tlb_all(void)
661 {
662         /* Although we don't have any data to pass, we do want to
663            synchronize with the other processors.  */
664         if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
665                 printk(KERN_CRIT "flush_tlb_all: timed out\n");
666         }
667 }
668
669 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
670
671 static void
672 ipi_flush_tlb_mm(void *x)
673 {
674         struct mm_struct *mm = (struct mm_struct *) x;
675         if (mm == current->active_mm && !asn_locked())
676                 flush_tlb_current(mm);
677         else
678                 flush_tlb_other(mm);
679 }
680
681 void
682 flush_tlb_mm(struct mm_struct *mm)
683 {
684         preempt_disable();
685
686         if (mm == current->active_mm) {
687                 flush_tlb_current(mm);
688                 if (atomic_read(&mm->mm_users) <= 1) {
689                         int cpu, this_cpu = smp_processor_id();
690                         for (cpu = 0; cpu < NR_CPUS; cpu++) {
691                                 if (!cpu_online(cpu) || cpu == this_cpu)
692                                         continue;
693                                 if (mm->context[cpu])
694                                         mm->context[cpu] = 0;
695                         }
696                         preempt_enable();
697                         return;
698                 }
699         }
700
701         if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
702                 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
703         }
704
705         preempt_enable();
706 }
707 EXPORT_SYMBOL(flush_tlb_mm);
708
709 struct flush_tlb_page_struct {
710         struct vm_area_struct *vma;
711         struct mm_struct *mm;
712         unsigned long addr;
713 };
714
715 static void
716 ipi_flush_tlb_page(void *x)
717 {
718         struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
719         struct mm_struct * mm = data->mm;
720
721         if (mm == current->active_mm && !asn_locked())
722                 flush_tlb_current_page(mm, data->vma, data->addr);
723         else
724                 flush_tlb_other(mm);
725 }
726
727 void
728 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
729 {
730         struct flush_tlb_page_struct data;
731         struct mm_struct *mm = vma->vm_mm;
732
733         preempt_disable();
734
735         if (mm == current->active_mm) {
736                 flush_tlb_current_page(mm, vma, addr);
737                 if (atomic_read(&mm->mm_users) <= 1) {
738                         int cpu, this_cpu = smp_processor_id();
739                         for (cpu = 0; cpu < NR_CPUS; cpu++) {
740                                 if (!cpu_online(cpu) || cpu == this_cpu)
741                                         continue;
742                                 if (mm->context[cpu])
743                                         mm->context[cpu] = 0;
744                         }
745                         preempt_enable();
746                         return;
747                 }
748         }
749
750         data.vma = vma;
751         data.mm = mm;
752         data.addr = addr;
753
754         if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
755                 printk(KERN_CRIT "flush_tlb_page: timed out\n");
756         }
757
758         preempt_enable();
759 }
760 EXPORT_SYMBOL(flush_tlb_page);
761
762 void
763 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
764 {
765         /* On the Alpha we always flush the whole user tlb.  */
766         flush_tlb_mm(vma->vm_mm);
767 }
768 EXPORT_SYMBOL(flush_tlb_range);
769
770 static void
771 ipi_flush_icache_page(void *x)
772 {
773         struct mm_struct *mm = (struct mm_struct *) x;
774         if (mm == current->active_mm && !asn_locked())
775                 __load_new_mm_context(mm);
776         else
777                 flush_tlb_other(mm);
778 }
779
780 void
781 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
782                         unsigned long addr, int len)
783 {
784         struct mm_struct *mm = vma->vm_mm;
785
786         if ((vma->vm_flags & VM_EXEC) == 0)
787                 return;
788
789         preempt_disable();
790
791         if (mm == current->active_mm) {
792                 __load_new_mm_context(mm);
793                 if (atomic_read(&mm->mm_users) <= 1) {
794                         int cpu, this_cpu = smp_processor_id();
795                         for (cpu = 0; cpu < NR_CPUS; cpu++) {
796                                 if (!cpu_online(cpu) || cpu == this_cpu)
797                                         continue;
798                                 if (mm->context[cpu])
799                                         mm->context[cpu] = 0;
800                         }
801                         preempt_enable();
802                         return;
803                 }
804         }
805
806         if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
807                 printk(KERN_CRIT "flush_icache_page: timed out\n");
808         }
809
810         preempt_enable();
811 }