ASoC: tpa6130a2: fix volume setting when no stream is running
[cascardo/linux.git] / arch / x86 / include / asm / uaccess_64.h
1 #ifndef _ASM_X86_UACCESS_64_H
2 #define _ASM_X86_UACCESS_64_H
3
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/lockdep.h>
10 #include <linux/kasan-checks.h>
11 #include <asm/alternative.h>
12 #include <asm/cpufeatures.h>
13 #include <asm/page.h>
14
15 /*
16  * Copy To/From Userspace
17  */
18
19 /* Handles exceptions in both to and from, but doesn't do access_ok */
20 __must_check unsigned long
21 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22 __must_check unsigned long
23 copy_user_generic_string(void *to, const void *from, unsigned len);
24 __must_check unsigned long
25 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26
27 static __always_inline __must_check unsigned long
28 copy_user_generic(void *to, const void *from, unsigned len)
29 {
30         unsigned ret;
31
32         /*
33          * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
34          * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
35          * Otherwise, use copy_user_generic_unrolled.
36          */
37         alternative_call_2(copy_user_generic_unrolled,
38                          copy_user_generic_string,
39                          X86_FEATURE_REP_GOOD,
40                          copy_user_enhanced_fast_string,
41                          X86_FEATURE_ERMS,
42                          ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43                                      "=d" (len)),
44                          "1" (to), "2" (from), "3" (len)
45                          : "memory", "rcx", "r8", "r9", "r10", "r11");
46         return ret;
47 }
48
49 __must_check unsigned long
50 copy_in_user(void __user *to, const void __user *from, unsigned len);
51
52 static __always_inline __must_check
53 int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
54 {
55         int ret = 0;
56
57         if (!__builtin_constant_p(size))
58                 return copy_user_generic(dst, (__force void *)src, size);
59         switch (size) {
60         case 1:
61                 __uaccess_begin();
62                 __get_user_asm(*(u8 *)dst, (u8 __user *)src,
63                               ret, "b", "b", "=q", 1);
64                 __uaccess_end();
65                 return ret;
66         case 2:
67                 __uaccess_begin();
68                 __get_user_asm(*(u16 *)dst, (u16 __user *)src,
69                               ret, "w", "w", "=r", 2);
70                 __uaccess_end();
71                 return ret;
72         case 4:
73                 __uaccess_begin();
74                 __get_user_asm(*(u32 *)dst, (u32 __user *)src,
75                               ret, "l", "k", "=r", 4);
76                 __uaccess_end();
77                 return ret;
78         case 8:
79                 __uaccess_begin();
80                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
81                               ret, "q", "", "=r", 8);
82                 __uaccess_end();
83                 return ret;
84         case 10:
85                 __uaccess_begin();
86                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
87                                ret, "q", "", "=r", 10);
88                 if (likely(!ret))
89                         __get_user_asm(*(u16 *)(8 + (char *)dst),
90                                        (u16 __user *)(8 + (char __user *)src),
91                                        ret, "w", "w", "=r", 2);
92                 __uaccess_end();
93                 return ret;
94         case 16:
95                 __uaccess_begin();
96                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
97                                ret, "q", "", "=r", 16);
98                 if (likely(!ret))
99                         __get_user_asm(*(u64 *)(8 + (char *)dst),
100                                        (u64 __user *)(8 + (char __user *)src),
101                                        ret, "q", "", "=r", 8);
102                 __uaccess_end();
103                 return ret;
104         default:
105                 return copy_user_generic(dst, (__force void *)src, size);
106         }
107 }
108
109 static __always_inline __must_check
110 int __copy_from_user(void *dst, const void __user *src, unsigned size)
111 {
112         might_fault();
113         kasan_check_write(dst, size);
114         return __copy_from_user_nocheck(dst, src, size);
115 }
116
117 static __always_inline __must_check
118 int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
119 {
120         int ret = 0;
121
122         if (!__builtin_constant_p(size))
123                 return copy_user_generic((__force void *)dst, src, size);
124         switch (size) {
125         case 1:
126                 __uaccess_begin();
127                 __put_user_asm(*(u8 *)src, (u8 __user *)dst,
128                               ret, "b", "b", "iq", 1);
129                 __uaccess_end();
130                 return ret;
131         case 2:
132                 __uaccess_begin();
133                 __put_user_asm(*(u16 *)src, (u16 __user *)dst,
134                               ret, "w", "w", "ir", 2);
135                 __uaccess_end();
136                 return ret;
137         case 4:
138                 __uaccess_begin();
139                 __put_user_asm(*(u32 *)src, (u32 __user *)dst,
140                               ret, "l", "k", "ir", 4);
141                 __uaccess_end();
142                 return ret;
143         case 8:
144                 __uaccess_begin();
145                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
146                               ret, "q", "", "er", 8);
147                 __uaccess_end();
148                 return ret;
149         case 10:
150                 __uaccess_begin();
151                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
152                                ret, "q", "", "er", 10);
153                 if (likely(!ret)) {
154                         asm("":::"memory");
155                         __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
156                                        ret, "w", "w", "ir", 2);
157                 }
158                 __uaccess_end();
159                 return ret;
160         case 16:
161                 __uaccess_begin();
162                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
163                                ret, "q", "", "er", 16);
164                 if (likely(!ret)) {
165                         asm("":::"memory");
166                         __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
167                                        ret, "q", "", "er", 8);
168                 }
169                 __uaccess_end();
170                 return ret;
171         default:
172                 return copy_user_generic((__force void *)dst, src, size);
173         }
174 }
175
176 static __always_inline __must_check
177 int __copy_to_user(void __user *dst, const void *src, unsigned size)
178 {
179         might_fault();
180         kasan_check_read(src, size);
181         return __copy_to_user_nocheck(dst, src, size);
182 }
183
184 static __always_inline __must_check
185 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
186 {
187         int ret = 0;
188
189         might_fault();
190         if (!__builtin_constant_p(size))
191                 return copy_user_generic((__force void *)dst,
192                                          (__force void *)src, size);
193         switch (size) {
194         case 1: {
195                 u8 tmp;
196                 __uaccess_begin();
197                 __get_user_asm(tmp, (u8 __user *)src,
198                                ret, "b", "b", "=q", 1);
199                 if (likely(!ret))
200                         __put_user_asm(tmp, (u8 __user *)dst,
201                                        ret, "b", "b", "iq", 1);
202                 __uaccess_end();
203                 return ret;
204         }
205         case 2: {
206                 u16 tmp;
207                 __uaccess_begin();
208                 __get_user_asm(tmp, (u16 __user *)src,
209                                ret, "w", "w", "=r", 2);
210                 if (likely(!ret))
211                         __put_user_asm(tmp, (u16 __user *)dst,
212                                        ret, "w", "w", "ir", 2);
213                 __uaccess_end();
214                 return ret;
215         }
216
217         case 4: {
218                 u32 tmp;
219                 __uaccess_begin();
220                 __get_user_asm(tmp, (u32 __user *)src,
221                                ret, "l", "k", "=r", 4);
222                 if (likely(!ret))
223                         __put_user_asm(tmp, (u32 __user *)dst,
224                                        ret, "l", "k", "ir", 4);
225                 __uaccess_end();
226                 return ret;
227         }
228         case 8: {
229                 u64 tmp;
230                 __uaccess_begin();
231                 __get_user_asm(tmp, (u64 __user *)src,
232                                ret, "q", "", "=r", 8);
233                 if (likely(!ret))
234                         __put_user_asm(tmp, (u64 __user *)dst,
235                                        ret, "q", "", "er", 8);
236                 __uaccess_end();
237                 return ret;
238         }
239         default:
240                 return copy_user_generic((__force void *)dst,
241                                          (__force void *)src, size);
242         }
243 }
244
245 static __must_check __always_inline int
246 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
247 {
248         kasan_check_write(dst, size);
249         return __copy_from_user_nocheck(dst, src, size);
250 }
251
252 static __must_check __always_inline int
253 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
254 {
255         kasan_check_read(src, size);
256         return __copy_to_user_nocheck(dst, src, size);
257 }
258
259 extern long __copy_user_nocache(void *dst, const void __user *src,
260                                 unsigned size, int zerorest);
261
262 static inline int
263 __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
264 {
265         might_fault();
266         kasan_check_write(dst, size);
267         return __copy_user_nocache(dst, src, size, 1);
268 }
269
270 static inline int
271 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
272                                   unsigned size)
273 {
274         kasan_check_write(dst, size);
275         return __copy_user_nocache(dst, src, size, 0);
276 }
277
278 unsigned long
279 copy_user_handle_tail(char *to, char *from, unsigned len);
280
281 #endif /* _ASM_X86_UACCESS_64_H */