2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/cache.h>
16 #include <linux/delay.h>
17 #include <linux/uaccess.h>
18 #include <linux/module.h>
20 #include <linux/atomic.h>
21 #include <arch/chip.h>
23 /* This page is remapped on startup to be hash-for-home. */
24 int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
26 int *__atomic_hashed_lock(volatile void *v)
28 /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
30 * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
31 * Using mm works here because atomic_locks is page aligned.
33 unsigned long ptr = __insn_mm((unsigned long)v >> 1,
34 (unsigned long)atomic_locks,
35 2, (ATOMIC_HASH_SHIFT + 2) - 1);
40 /* Return whether the passed pointer is a valid atomic lock pointer. */
41 static int is_atomic_lock(int *p)
43 return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
46 void __atomic_fault_unlock(int *irqlock_word)
48 BUG_ON(!is_atomic_lock(irqlock_word));
49 BUG_ON(*irqlock_word != 1);
53 #endif /* CONFIG_SMP */
55 static inline int *__atomic_setup(volatile void *v)
57 /* Issue a load to the target to bring it into cache. */
59 return __atomic_hashed_lock(v);
62 int _atomic_xchg(atomic_t *v, int n)
64 return __atomic_xchg(&v->counter, __atomic_setup(v), n).val;
66 EXPORT_SYMBOL(_atomic_xchg);
68 int _atomic_xchg_add(atomic_t *v, int i)
70 return __atomic_xchg_add(&v->counter, __atomic_setup(v), i).val;
72 EXPORT_SYMBOL(_atomic_xchg_add);
74 int _atomic_xchg_add_unless(atomic_t *v, int a, int u)
77 * Note: argument order is switched here since it is easier
78 * to use the first argument consistently as the "old value"
79 * in the assembly, as is done for _atomic_cmpxchg().
81 return __atomic_xchg_add_unless(&v->counter, __atomic_setup(v), u, a)
84 EXPORT_SYMBOL(_atomic_xchg_add_unless);
86 int _atomic_cmpxchg(atomic_t *v, int o, int n)
88 return __atomic_cmpxchg(&v->counter, __atomic_setup(v), o, n).val;
90 EXPORT_SYMBOL(_atomic_cmpxchg);
92 unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
94 return __atomic_or((int *)p, __atomic_setup(p), mask).val;
96 EXPORT_SYMBOL(_atomic_or);
98 unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
100 return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
102 EXPORT_SYMBOL(_atomic_andn);
104 unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
106 return __atomic_xor((int *)p, __atomic_setup(p), mask).val;
108 EXPORT_SYMBOL(_atomic_xor);
111 u64 _atomic64_xchg(atomic64_t *v, u64 n)
113 return __atomic64_xchg(&v->counter, __atomic_setup(v), n);
115 EXPORT_SYMBOL(_atomic64_xchg);
117 u64 _atomic64_xchg_add(atomic64_t *v, u64 i)
119 return __atomic64_xchg_add(&v->counter, __atomic_setup(v), i);
121 EXPORT_SYMBOL(_atomic64_xchg_add);
123 u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u)
126 * Note: argument order is switched here since it is easier
127 * to use the first argument consistently as the "old value"
128 * in the assembly, as is done for _atomic_cmpxchg().
130 return __atomic64_xchg_add_unless(&v->counter, __atomic_setup(v),
133 EXPORT_SYMBOL(_atomic64_xchg_add_unless);
135 u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
137 return __atomic64_cmpxchg(&v->counter, __atomic_setup(v), o, n);
139 EXPORT_SYMBOL(_atomic64_cmpxchg);
143 * If any of the atomic or futex routines hit a bad address (not in
144 * the page tables at kernel PL) this routine is called. The futex
145 * routines are never used on kernel space, and the normal atomics and
146 * bitops are never used on user space. So a fault on kernel space
147 * must be fatal, but a fault on userspace is a futex fault and we
148 * need to return -EFAULT. Note that the context this routine is
149 * invoked in is the context of the "_atomic_xxx()" routines called
150 * by the functions in this file.
152 struct __get_user __atomic_bad_address(int __user *addr)
154 if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
155 panic("Bad address used for kernel atomic op: %p\n", addr);
156 return (struct __get_user) { .err = -EFAULT };
160 void __init __init_atomic_per_cpu(void)
162 /* Validate power-of-two and "bigger than cpus" assumption */
163 BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
164 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
167 * On TILEPro we prefer to use a single hash-for-home
168 * page, since this means atomic operations are less
169 * likely to encounter a TLB fault and thus should
170 * in general perform faster. You may wish to disable
171 * this in situations where few hash-for-home tiles
174 BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
176 /* The locks must all fit on one page. */
177 BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
180 * We use the page offset of the atomic value's address as
181 * an index into atomic_locks, excluding the low 3 bits.
182 * That should not produce more indices than ATOMIC_HASH_SIZE.
184 BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);