1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	linux/arch/alpha/kernel/smp.c
4  *
5  *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
6  *            Renamed modified smp_call_function to smp_call_function_on_cpu()
7  *            Created an function that conforms to the old calling convention
8  *            of smp_call_function().
9  *
10  *            This is helpful for DCPI.
11  *
12  */
13 
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/module.h>
18 #include <linux/sched/mm.h>
19 #include <linux/mm.h>
20 #include <linux/err.h>
21 #include <linux/threads.h>
22 #include <linux/smp.h>
23 #include <linux/interrupt.h>
24 #include <linux/init.h>
25 #include <linux/delay.h>
26 #include <linux/spinlock.h>
27 #include <linux/irq.h>
28 #include <linux/cache.h>
29 #include <linux/profile.h>
30 #include <linux/bitops.h>
31 #include <linux/cpu.h>
32 
33 #include <asm/hwrpb.h>
34 #include <asm/ptrace.h>
35 #include <linux/atomic.h>
36 
37 #include <asm/io.h>
38 #include <asm/irq.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
41 
42 #include "proto.h"
43 #include "irq_impl.h"
44 
45 
46 #define DEBUG_SMP 0
47 #if DEBUG_SMP
48 #define DBGS(args)	printk args
49 #else
50 #define DBGS(args)
51 #endif
52 
53 /* A collection of per-processor data.  */
54 struct cpuinfo_alpha cpu_data[NR_CPUS];
55 EXPORT_SYMBOL(cpu_data);
56 
57 /* A collection of single bit ipi messages.  */
58 static struct {
59 	unsigned long bits ____cacheline_aligned;
60 } ipi_data[NR_CPUS] __cacheline_aligned;
61 
62 enum ipi_message_type {
63 	IPI_RESCHEDULE,
64 	IPI_CALL_FUNC,
65 	IPI_CPU_STOP,
66 };
67 
68 /* Set to a secondary's cpuid when it comes online.  */
69 static int smp_secondary_alive = 0;
70 
71 int smp_num_probed;		/* Internal processor count */
72 int smp_num_cpus = 1;		/* Number that came online.  */
73 EXPORT_SYMBOL(smp_num_cpus);
74 
75 /*
76  * Called by both boot and secondaries to move global data into
77  *  per-processor storage.
78  */
79 static inline void __init
smp_store_cpu_info(int cpuid)80 smp_store_cpu_info(int cpuid)
81 {
82 	cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
83 	cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
84 	cpu_data[cpuid].need_new_asn = 0;
85 	cpu_data[cpuid].asn_lock = 0;
86 }
87 
88 /*
89  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
90  */
91 static inline void __init
smp_setup_percpu_timer(int cpuid)92 smp_setup_percpu_timer(int cpuid)
93 {
94 	cpu_data[cpuid].prof_counter = 1;
95 	cpu_data[cpuid].prof_multiplier = 1;
96 }
97 
98 static void __init
wait_boot_cpu_to_stop(int cpuid)99 wait_boot_cpu_to_stop(int cpuid)
100 {
101 	unsigned long stop = jiffies + 10*HZ;
102 
103 	while (time_before(jiffies, stop)) {
104 	        if (!smp_secondary_alive)
105 			return;
106 		barrier();
107 	}
108 
109 	printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
110 	for (;;)
111 		barrier();
112 }
113 
114 /*
115  * Where secondaries begin a life of C.
116  */
117 void __init
smp_callin(void)118 smp_callin(void)
119 {
120 	int cpuid = hard_smp_processor_id();
121 
122 	if (cpu_online(cpuid)) {
123 		printk("??, cpu 0x%x already present??\n", cpuid);
124 		BUG();
125 	}
126 	set_cpu_online(cpuid, true);
127 
128 	/* Turn on machine checks.  */
129 	wrmces(7);
130 
131 	/* Set trap vectors.  */
132 	trap_init();
133 
134 	/* Set interrupt vector.  */
135 	wrent(entInt, 0);
136 
137 	/* Get our local ticker going. */
138 	smp_setup_percpu_timer(cpuid);
139 	init_clockevent();
140 
141 	/* Call platform-specific callin, if specified */
142 	if (alpha_mv.smp_callin)
143 		alpha_mv.smp_callin();
144 
145 	/* All kernel threads share the same mm context.  */
146 	mmgrab(&init_mm);
147 	current->active_mm = &init_mm;
148 
149 	/* inform the notifiers about the new cpu */
150 	notify_cpu_starting(cpuid);
151 
152 	/* Must have completely accurate bogos.  */
153 	local_irq_enable();
154 
155 	/* Wait boot CPU to stop with irq enabled before running
156 	   calibrate_delay. */
157 	wait_boot_cpu_to_stop(cpuid);
158 	mb();
159 	calibrate_delay();
160 
161 	smp_store_cpu_info(cpuid);
162 	/* Allow master to continue only after we written loops_per_jiffy.  */
163 	wmb();
164 	smp_secondary_alive = 1;
165 
166 	DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
167 	      cpuid, current, current->active_mm));
168 
169 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
170 }
171 
172 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
173 static int
wait_for_txrdy(unsigned long cpumask)174 wait_for_txrdy (unsigned long cpumask)
175 {
176 	unsigned long timeout;
177 
178 	if (!(hwrpb->txrdy & cpumask))
179 		return 0;
180 
181 	timeout = jiffies + 10*HZ;
182 	while (time_before(jiffies, timeout)) {
183 		if (!(hwrpb->txrdy & cpumask))
184 			return 0;
185 		udelay(10);
186 		barrier();
187 	}
188 
189 	return -1;
190 }
191 
192 /*
193  * Send a message to a secondary's console.  "START" is one such
194  * interesting message.  ;-)
195  */
196 static void
send_secondary_console_msg(char * str,int cpuid)197 send_secondary_console_msg(char *str, int cpuid)
198 {
199 	struct percpu_struct *cpu;
200 	register char *cp1, *cp2;
201 	unsigned long cpumask;
202 	size_t len;
203 
204 	cpu = (struct percpu_struct *)
205 		((char*)hwrpb
206 		 + hwrpb->processor_offset
207 		 + cpuid * hwrpb->processor_size);
208 
209 	cpumask = (1UL << cpuid);
210 	if (wait_for_txrdy(cpumask))
211 		goto timeout;
212 
213 	cp2 = str;
214 	len = strlen(cp2);
215 	*(unsigned int *)&cpu->ipc_buffer[0] = len;
216 	cp1 = (char *) &cpu->ipc_buffer[1];
217 	memcpy(cp1, cp2, len);
218 
219 	/* atomic test and set */
220 	wmb();
221 	set_bit(cpuid, &hwrpb->rxrdy);
222 
223 	if (wait_for_txrdy(cpumask))
224 		goto timeout;
225 	return;
226 
227  timeout:
228 	printk("Processor %x not ready\n", cpuid);
229 }
230 
231 /*
232  * A secondary console wants to send a message.  Receive it.
233  */
234 static void
recv_secondary_console_msg(void)235 recv_secondary_console_msg(void)
236 {
237 	int mycpu, i, cnt;
238 	unsigned long txrdy = hwrpb->txrdy;
239 	char *cp1, *cp2, buf[80];
240 	struct percpu_struct *cpu;
241 
242 	DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
243 
244 	mycpu = hard_smp_processor_id();
245 
246 	for (i = 0; i < NR_CPUS; i++) {
247 		if (!(txrdy & (1UL << i)))
248 			continue;
249 
250 		DBGS(("recv_secondary_console_msg: "
251 		      "TXRDY contains CPU %d.\n", i));
252 
253 		cpu = (struct percpu_struct *)
254 		  ((char*)hwrpb
255 		   + hwrpb->processor_offset
256 		   + i * hwrpb->processor_size);
257 
258  		DBGS(("recv_secondary_console_msg: on %d from %d"
259 		      " HALT_REASON 0x%lx FLAGS 0x%lx\n",
260 		      mycpu, i, cpu->halt_reason, cpu->flags));
261 
262 		cnt = cpu->ipc_buffer[0] >> 32;
263 		if (cnt <= 0 || cnt >= 80)
264 			strcpy(buf, "<<< BOGUS MSG >>>");
265 		else {
266 			cp1 = (char *) &cpu->ipc_buffer[1];
267 			cp2 = buf;
268 			memcpy(cp2, cp1, cnt);
269 			cp2[cnt] = '\0';
270 
271 			while ((cp2 = strchr(cp2, '\r')) != 0) {
272 				*cp2 = ' ';
273 				if (cp2[1] == '\n')
274 					cp2[1] = ' ';
275 			}
276 		}
277 
278 		DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
279 		      "message is '%s'\n", mycpu, buf));
280 	}
281 
282 	hwrpb->txrdy = 0;
283 }
284 
285 /*
286  * Convince the console to have a secondary cpu begin execution.
287  */
288 static int
secondary_cpu_start(int cpuid,struct task_struct * idle)289 secondary_cpu_start(int cpuid, struct task_struct *idle)
290 {
291 	struct percpu_struct *cpu;
292 	struct pcb_struct *hwpcb, *ipcb;
293 	unsigned long timeout;
294 
295 	cpu = (struct percpu_struct *)
296 		((char*)hwrpb
297 		 + hwrpb->processor_offset
298 		 + cpuid * hwrpb->processor_size);
299 	hwpcb = (struct pcb_struct *) cpu->hwpcb;
300 	ipcb = &task_thread_info(idle)->pcb;
301 
302 	/* Initialize the CPU's HWPCB to something just good enough for
303 	   us to get started.  Immediately after starting, we'll swpctx
304 	   to the target idle task's pcb.  Reuse the stack in the mean
305 	   time.  Precalculate the target PCBB.  */
306 	hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
307 	hwpcb->usp = 0;
308 	hwpcb->ptbr = ipcb->ptbr;
309 	hwpcb->pcc = 0;
310 	hwpcb->asn = 0;
311 	hwpcb->unique = virt_to_phys(ipcb);
312 	hwpcb->flags = ipcb->flags;
313 	hwpcb->res1 = hwpcb->res2 = 0;
314 
315 #if 0
316 	DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
317 	      hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
318 #endif
319 	DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
320 	      cpuid, idle->state, ipcb->flags));
321 
322 	/* Setup HWRPB fields that SRM uses to activate secondary CPU */
323 	hwrpb->CPU_restart = __smp_callin;
324 	hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
325 
326 	/* Recalculate and update the HWRPB checksum */
327 	hwrpb_update_checksum(hwrpb);
328 
329 	/*
330 	 * Send a "start" command to the specified processor.
331 	 */
332 
333 	/* SRM III 3.4.1.3 */
334 	cpu->flags |= 0x22;	/* turn on Context Valid and Restart Capable */
335 	cpu->flags &= ~1;	/* turn off Bootstrap In Progress */
336 	wmb();
337 
338 	send_secondary_console_msg("START\r\n", cpuid);
339 
340 	/* Wait 10 seconds for an ACK from the console.  */
341 	timeout = jiffies + 10*HZ;
342 	while (time_before(jiffies, timeout)) {
343 		if (cpu->flags & 1)
344 			goto started;
345 		udelay(10);
346 		barrier();
347 	}
348 	printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
349 	return -1;
350 
351  started:
352 	DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
353 	return 0;
354 }
355 
356 /*
357  * Bring one cpu online.
358  */
359 static int
smp_boot_one_cpu(int cpuid,struct task_struct * idle)360 smp_boot_one_cpu(int cpuid, struct task_struct *idle)
361 {
362 	unsigned long timeout;
363 
364 	/* Signal the secondary to wait a moment.  */
365 	smp_secondary_alive = -1;
366 
367 	/* Whirrr, whirrr, whirrrrrrrrr... */
368 	if (secondary_cpu_start(cpuid, idle))
369 		return -1;
370 
371 	/* Notify the secondary CPU it can run calibrate_delay.  */
372 	mb();
373 	smp_secondary_alive = 0;
374 
375 	/* We've been acked by the console; wait one second for
376 	   the task to start up for real.  */
377 	timeout = jiffies + 1*HZ;
378 	while (time_before(jiffies, timeout)) {
379 		if (smp_secondary_alive == 1)
380 			goto alive;
381 		udelay(10);
382 		barrier();
383 	}
384 
385 	/* We failed to boot the CPU.  */
386 
387 	printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
388 	return -1;
389 
390  alive:
391 	/* Another "Red Snapper". */
392 	return 0;
393 }
394 
395 /*
396  * Called from setup_arch.  Detect an SMP system and which processors
397  * are present.
398  */
399 void __init
setup_smp(void)400 setup_smp(void)
401 {
402 	struct percpu_struct *cpubase, *cpu;
403 	unsigned long i;
404 
405 	if (boot_cpuid != 0) {
406 		printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
407 		       boot_cpuid);
408 	}
409 
410 	if (hwrpb->nr_processors > 1) {
411 		int boot_cpu_palrev;
412 
413 		DBGS(("setup_smp: nr_processors %ld\n",
414 		      hwrpb->nr_processors));
415 
416 		cpubase = (struct percpu_struct *)
417 			((char*)hwrpb + hwrpb->processor_offset);
418 		boot_cpu_palrev = cpubase->pal_revision;
419 
420 		for (i = 0; i < hwrpb->nr_processors; i++) {
421 			cpu = (struct percpu_struct *)
422 				((char *)cpubase + i*hwrpb->processor_size);
423 			if ((cpu->flags & 0x1cc) == 0x1cc) {
424 				smp_num_probed++;
425 				set_cpu_possible(i, true);
426 				set_cpu_present(i, true);
427 				cpu->pal_revision = boot_cpu_palrev;
428 			}
429 
430 			DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
431 			      i, cpu->flags, cpu->type));
432 			DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
433 			      i, cpu->pal_revision));
434 		}
435 	} else {
436 		smp_num_probed = 1;
437 	}
438 
439 	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
440 	       smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
441 }
442 
443 /*
444  * Called by smp_init prepare the secondaries
445  */
446 void __init
smp_prepare_cpus(unsigned int max_cpus)447 smp_prepare_cpus(unsigned int max_cpus)
448 {
449 	/* Take care of some initial bookkeeping.  */
450 	memset(ipi_data, 0, sizeof(ipi_data));
451 
452 	current_thread_info()->cpu = boot_cpuid;
453 
454 	smp_store_cpu_info(boot_cpuid);
455 	smp_setup_percpu_timer(boot_cpuid);
456 
457 	/* Nothing to do on a UP box, or when told not to.  */
458 	if (smp_num_probed == 1 || max_cpus == 0) {
459 		init_cpu_possible(cpumask_of(boot_cpuid));
460 		init_cpu_present(cpumask_of(boot_cpuid));
461 		printk(KERN_INFO "SMP mode deactivated.\n");
462 		return;
463 	}
464 
465 	printk(KERN_INFO "SMP starting up secondaries.\n");
466 
467 	smp_num_cpus = smp_num_probed;
468 }
469 
470 void
smp_prepare_boot_cpu(void)471 smp_prepare_boot_cpu(void)
472 {
473 }
474 
475 int
__cpu_up(unsigned int cpu,struct task_struct * tidle)476 __cpu_up(unsigned int cpu, struct task_struct *tidle)
477 {
478 	smp_boot_one_cpu(cpu, tidle);
479 
480 	return cpu_online(cpu) ? 0 : -ENOSYS;
481 }
482 
483 void __init
smp_cpus_done(unsigned int max_cpus)484 smp_cpus_done(unsigned int max_cpus)
485 {
486 	int cpu;
487 	unsigned long bogosum = 0;
488 
489 	for(cpu = 0; cpu < NR_CPUS; cpu++)
490 		if (cpu_online(cpu))
491 			bogosum += cpu_data[cpu].loops_per_jiffy;
492 
493 	printk(KERN_INFO "SMP: Total of %d processors activated "
494 	       "(%lu.%02lu BogoMIPS).\n",
495 	       num_online_cpus(),
496 	       (bogosum + 2500) / (500000/HZ),
497 	       ((bogosum + 2500) / (5000/HZ)) % 100);
498 }
499 
500 int
setup_profiling_timer(unsigned int multiplier)501 setup_profiling_timer(unsigned int multiplier)
502 {
503 	return -EINVAL;
504 }
505 
506 static void
send_ipi_message(const struct cpumask * to_whom,enum ipi_message_type operation)507 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
508 {
509 	int i;
510 
511 	mb();
512 	for_each_cpu(i, to_whom)
513 		set_bit(operation, &ipi_data[i].bits);
514 
515 	mb();
516 	for_each_cpu(i, to_whom)
517 		wripir(i);
518 }
519 
520 void
handle_ipi(struct pt_regs * regs)521 handle_ipi(struct pt_regs *regs)
522 {
523 	int this_cpu = smp_processor_id();
524 	unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
525 	unsigned long ops;
526 
527 #if 0
528 	DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
529 	      this_cpu, *pending_ipis, regs->pc));
530 #endif
531 
532 	mb();	/* Order interrupt and bit testing. */
533 	while ((ops = xchg(pending_ipis, 0)) != 0) {
534 	  mb();	/* Order bit clearing and data access. */
535 	  do {
536 		unsigned long which;
537 
538 		which = ops & -ops;
539 		ops &= ~which;
540 		which = __ffs(which);
541 
542 		switch (which) {
543 		case IPI_RESCHEDULE:
544 			scheduler_ipi();
545 			break;
546 
547 		case IPI_CALL_FUNC:
548 			generic_smp_call_function_interrupt();
549 			break;
550 
551 		case IPI_CPU_STOP:
552 			halt();
553 
554 		default:
555 			printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
556 			       this_cpu, which);
557 			break;
558 		}
559 	  } while (ops);
560 
561 	  mb();	/* Order data access and bit testing. */
562 	}
563 
564 	cpu_data[this_cpu].ipi_count++;
565 
566 	if (hwrpb->txrdy)
567 		recv_secondary_console_msg();
568 }
569 
570 void
smp_send_reschedule(int cpu)571 smp_send_reschedule(int cpu)
572 {
573 #ifdef DEBUG_IPI_MSG
574 	if (cpu == hard_smp_processor_id())
575 		printk(KERN_WARNING
576 		       "smp_send_reschedule: Sending IPI to self.\n");
577 #endif
578 	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
579 }
580 
581 void
smp_send_stop(void)582 smp_send_stop(void)
583 {
584 	cpumask_t to_whom;
585 	cpumask_copy(&to_whom, cpu_online_mask);
586 	cpumask_clear_cpu(smp_processor_id(), &to_whom);
587 #ifdef DEBUG_IPI_MSG
588 	if (hard_smp_processor_id() != boot_cpu_id)
589 		printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
590 #endif
591 	send_ipi_message(&to_whom, IPI_CPU_STOP);
592 }
593 
arch_send_call_function_ipi_mask(const struct cpumask * mask)594 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
595 {
596 	send_ipi_message(mask, IPI_CALL_FUNC);
597 }
598 
arch_send_call_function_single_ipi(int cpu)599 void arch_send_call_function_single_ipi(int cpu)
600 {
601 	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
602 }
603 
604 static void
ipi_imb(void * ignored)605 ipi_imb(void *ignored)
606 {
607 	imb();
608 }
609 
610 void
smp_imb(void)611 smp_imb(void)
612 {
613 	/* Must wait other processors to flush their icache before continue. */
614 	on_each_cpu(ipi_imb, NULL, 1);
615 }
616 EXPORT_SYMBOL(smp_imb);
617 
618 static void
ipi_flush_tlb_all(void * ignored)619 ipi_flush_tlb_all(void *ignored)
620 {
621 	tbia();
622 }
623 
624 void
flush_tlb_all(void)625 flush_tlb_all(void)
626 {
627 	/* Although we don't have any data to pass, we do want to
628 	   synchronize with the other processors.  */
629 	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
630 }
631 
632 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
633 
634 static void
ipi_flush_tlb_mm(void * x)635 ipi_flush_tlb_mm(void *x)
636 {
637 	struct mm_struct *mm = (struct mm_struct *) x;
638 	if (mm == current->active_mm && !asn_locked())
639 		flush_tlb_current(mm);
640 	else
641 		flush_tlb_other(mm);
642 }
643 
644 void
flush_tlb_mm(struct mm_struct * mm)645 flush_tlb_mm(struct mm_struct *mm)
646 {
647 	preempt_disable();
648 
649 	if (mm == current->active_mm) {
650 		flush_tlb_current(mm);
651 		if (atomic_read(&mm->mm_users) <= 1) {
652 			int cpu, this_cpu = smp_processor_id();
653 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
654 				if (!cpu_online(cpu) || cpu == this_cpu)
655 					continue;
656 				if (mm->context[cpu])
657 					mm->context[cpu] = 0;
658 			}
659 			preempt_enable();
660 			return;
661 		}
662 	}
663 
664 	smp_call_function(ipi_flush_tlb_mm, mm, 1);
665 
666 	preempt_enable();
667 }
668 EXPORT_SYMBOL(flush_tlb_mm);
669 
670 struct flush_tlb_page_struct {
671 	struct vm_area_struct *vma;
672 	struct mm_struct *mm;
673 	unsigned long addr;
674 };
675 
676 static void
ipi_flush_tlb_page(void * x)677 ipi_flush_tlb_page(void *x)
678 {
679 	struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
680 	struct mm_struct * mm = data->mm;
681 
682 	if (mm == current->active_mm && !asn_locked())
683 		flush_tlb_current_page(mm, data->vma, data->addr);
684 	else
685 		flush_tlb_other(mm);
686 }
687 
688 void
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)689 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
690 {
691 	struct flush_tlb_page_struct data;
692 	struct mm_struct *mm = vma->vm_mm;
693 
694 	preempt_disable();
695 
696 	if (mm == current->active_mm) {
697 		flush_tlb_current_page(mm, vma, addr);
698 		if (atomic_read(&mm->mm_users) <= 1) {
699 			int cpu, this_cpu = smp_processor_id();
700 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
701 				if (!cpu_online(cpu) || cpu == this_cpu)
702 					continue;
703 				if (mm->context[cpu])
704 					mm->context[cpu] = 0;
705 			}
706 			preempt_enable();
707 			return;
708 		}
709 	}
710 
711 	data.vma = vma;
712 	data.mm = mm;
713 	data.addr = addr;
714 
715 	smp_call_function(ipi_flush_tlb_page, &data, 1);
716 
717 	preempt_enable();
718 }
719 EXPORT_SYMBOL(flush_tlb_page);
720 
721 void
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)722 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
723 {
724 	/* On the Alpha we always flush the whole user tlb.  */
725 	flush_tlb_mm(vma->vm_mm);
726 }
727 EXPORT_SYMBOL(flush_tlb_range);
728 
729 static void
ipi_flush_icache_page(void * x)730 ipi_flush_icache_page(void *x)
731 {
732 	struct mm_struct *mm = (struct mm_struct *) x;
733 	if (mm == current->active_mm && !asn_locked())
734 		__load_new_mm_context(mm);
735 	else
736 		flush_tlb_other(mm);
737 }
738 
739 void
flush_icache_user_page(struct vm_area_struct * vma,struct page * page,unsigned long addr,int len)740 flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
741 			unsigned long addr, int len)
742 {
743 	struct mm_struct *mm = vma->vm_mm;
744 
745 	if ((vma->vm_flags & VM_EXEC) == 0)
746 		return;
747 
748 	preempt_disable();
749 
750 	if (mm == current->active_mm) {
751 		__load_new_mm_context(mm);
752 		if (atomic_read(&mm->mm_users) <= 1) {
753 			int cpu, this_cpu = smp_processor_id();
754 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
755 				if (!cpu_online(cpu) || cpu == this_cpu)
756 					continue;
757 				if (mm->context[cpu])
758 					mm->context[cpu] = 0;
759 			}
760 			preempt_enable();
761 			return;
762 		}
763 	}
764 
765 	smp_call_function(ipi_flush_icache_page, mm, 1);
766 
767 	preempt_enable();
768 }
769