1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Standard user space access functions based on mvcp/mvcs and doing
4  *  interesting things in the secondary space mode.
5  *
6  *    Copyright IBM Corp. 2006,2014
7  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8  *		 Gerald Schaefer (gerald.schaefer@de.ibm.com)
9  */
10 
11 #include <linux/jump_label.h>
12 #include <linux/uaccess.h>
13 #include <linux/export.h>
14 #include <linux/errno.h>
15 #include <linux/mm.h>
16 #include <asm/mmu_context.h>
17 #include <asm/facility.h>
18 
19 #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
20 static DEFINE_STATIC_KEY_FALSE(have_mvcos);
21 
uaccess_init(void)22 static int __init uaccess_init(void)
23 {
24 	if (test_facility(27))
25 		static_branch_enable(&have_mvcos);
26 	return 0;
27 }
28 early_initcall(uaccess_init);
29 
copy_with_mvcos(void)30 static inline int copy_with_mvcos(void)
31 {
32 	if (static_branch_likely(&have_mvcos))
33 		return 1;
34 	return 0;
35 }
36 #else
copy_with_mvcos(void)37 static inline int copy_with_mvcos(void)
38 {
39 	return 1;
40 }
41 #endif
42 
set_fs(mm_segment_t fs)43 void set_fs(mm_segment_t fs)
44 {
45 	current->thread.mm_segment = fs;
46 	if (fs == USER_DS) {
47 		__ctl_load(S390_lowcore.user_asce, 1, 1);
48 		clear_cpu_flag(CIF_ASCE_PRIMARY);
49 	} else {
50 		__ctl_load(S390_lowcore.kernel_asce, 1, 1);
51 		set_cpu_flag(CIF_ASCE_PRIMARY);
52 	}
53 	if (fs & 1) {
54 		if (fs == USER_DS_SACF)
55 			__ctl_load(S390_lowcore.user_asce, 7, 7);
56 		else
57 			__ctl_load(S390_lowcore.kernel_asce, 7, 7);
58 		set_cpu_flag(CIF_ASCE_SECONDARY);
59 	}
60 }
61 EXPORT_SYMBOL(set_fs);
62 
enable_sacf_uaccess(void)63 mm_segment_t enable_sacf_uaccess(void)
64 {
65 	mm_segment_t old_fs;
66 	unsigned long asce, cr;
67 
68 	old_fs = current->thread.mm_segment;
69 	if (old_fs & 1)
70 		return old_fs;
71 	current->thread.mm_segment |= 1;
72 	asce = S390_lowcore.kernel_asce;
73 	if (likely(old_fs == USER_DS)) {
74 		__ctl_store(cr, 1, 1);
75 		if (cr != S390_lowcore.kernel_asce) {
76 			__ctl_load(S390_lowcore.kernel_asce, 1, 1);
77 			set_cpu_flag(CIF_ASCE_PRIMARY);
78 		}
79 		asce = S390_lowcore.user_asce;
80 	}
81 	__ctl_store(cr, 7, 7);
82 	if (cr != asce) {
83 		__ctl_load(asce, 7, 7);
84 		set_cpu_flag(CIF_ASCE_SECONDARY);
85 	}
86 	return old_fs;
87 }
88 EXPORT_SYMBOL(enable_sacf_uaccess);
89 
disable_sacf_uaccess(mm_segment_t old_fs)90 void disable_sacf_uaccess(mm_segment_t old_fs)
91 {
92 	current->thread.mm_segment = old_fs;
93 	if (old_fs == USER_DS && test_facility(27)) {
94 		__ctl_load(S390_lowcore.user_asce, 1, 1);
95 		clear_cpu_flag(CIF_ASCE_PRIMARY);
96 	}
97 }
98 EXPORT_SYMBOL(disable_sacf_uaccess);
99 
copy_from_user_mvcos(void * x,const void __user * ptr,unsigned long size)100 static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
101 						 unsigned long size)
102 {
103 	register unsigned long reg0 asm("0") = 0x01UL;
104 	unsigned long tmp1, tmp2;
105 
106 	tmp1 = -4096UL;
107 	asm volatile(
108 		"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
109 		"6: jz    4f\n"
110 		"1: algr  %0,%3\n"
111 		"   slgr  %1,%3\n"
112 		"   slgr  %2,%3\n"
113 		"   j     0b\n"
114 		"2: la    %4,4095(%1)\n"/* %4 = ptr + 4095 */
115 		"   nr    %4,%3\n"	/* %4 = (ptr + 4095) & -4096 */
116 		"   slgr  %4,%1\n"
117 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
118 		"   jnh   5f\n"
119 		"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
120 		"7: slgr  %0,%4\n"
121 		"   j     5f\n"
122 		"4: slgr  %0,%0\n"
123 		"5:\n"
124 		EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
125 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
126 		: "d" (reg0) : "cc", "memory");
127 	return size;
128 }
129 
copy_from_user_mvcp(void * x,const void __user * ptr,unsigned long size)130 static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
131 						unsigned long size)
132 {
133 	unsigned long tmp1, tmp2;
134 	mm_segment_t old_fs;
135 
136 	old_fs = enable_sacf_uaccess();
137 	tmp1 = -256UL;
138 	asm volatile(
139 		"   sacf  0\n"
140 		"0: mvcp  0(%0,%2),0(%1),%3\n"
141 		"7: jz    5f\n"
142 		"1: algr  %0,%3\n"
143 		"   la    %1,256(%1)\n"
144 		"   la    %2,256(%2)\n"
145 		"2: mvcp  0(%0,%2),0(%1),%3\n"
146 		"8: jnz   1b\n"
147 		"   j     5f\n"
148 		"3: la    %4,255(%1)\n"	/* %4 = ptr + 255 */
149 		"   lghi  %3,-4096\n"
150 		"   nr    %4,%3\n"	/* %4 = (ptr + 255) & -4096 */
151 		"   slgr  %4,%1\n"
152 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
153 		"   jnh   6f\n"
154 		"4: mvcp  0(%4,%2),0(%1),%3\n"
155 		"9: slgr  %0,%4\n"
156 		"   j     6f\n"
157 		"5: slgr  %0,%0\n"
158 		"6: sacf  768\n"
159 		EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
160 		EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
161 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
162 		: : "cc", "memory");
163 	disable_sacf_uaccess(old_fs);
164 	return size;
165 }
166 
raw_copy_from_user(void * to,const void __user * from,unsigned long n)167 unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
168 {
169 	if (copy_with_mvcos())
170 		return copy_from_user_mvcos(to, from, n);
171 	return copy_from_user_mvcp(to, from, n);
172 }
173 EXPORT_SYMBOL(raw_copy_from_user);
174 
copy_to_user_mvcos(void __user * ptr,const void * x,unsigned long size)175 static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
176 					       unsigned long size)
177 {
178 	register unsigned long reg0 asm("0") = 0x010000UL;
179 	unsigned long tmp1, tmp2;
180 
181 	tmp1 = -4096UL;
182 	asm volatile(
183 		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
184 		"6: jz    4f\n"
185 		"1: algr  %0,%3\n"
186 		"   slgr  %1,%3\n"
187 		"   slgr  %2,%3\n"
188 		"   j     0b\n"
189 		"2: la    %4,4095(%1)\n"/* %4 = ptr + 4095 */
190 		"   nr    %4,%3\n"	/* %4 = (ptr + 4095) & -4096 */
191 		"   slgr  %4,%1\n"
192 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
193 		"   jnh   5f\n"
194 		"3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
195 		"7: slgr  %0,%4\n"
196 		"   j     5f\n"
197 		"4: slgr  %0,%0\n"
198 		"5:\n"
199 		EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
200 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
201 		: "d" (reg0) : "cc", "memory");
202 	return size;
203 }
204 
copy_to_user_mvcs(void __user * ptr,const void * x,unsigned long size)205 static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
206 					      unsigned long size)
207 {
208 	unsigned long tmp1, tmp2;
209 	mm_segment_t old_fs;
210 
211 	old_fs = enable_sacf_uaccess();
212 	tmp1 = -256UL;
213 	asm volatile(
214 		"   sacf  0\n"
215 		"0: mvcs  0(%0,%1),0(%2),%3\n"
216 		"7: jz    5f\n"
217 		"1: algr  %0,%3\n"
218 		"   la    %1,256(%1)\n"
219 		"   la    %2,256(%2)\n"
220 		"2: mvcs  0(%0,%1),0(%2),%3\n"
221 		"8: jnz   1b\n"
222 		"   j     5f\n"
223 		"3: la    %4,255(%1)\n" /* %4 = ptr + 255 */
224 		"   lghi  %3,-4096\n"
225 		"   nr    %4,%3\n"	/* %4 = (ptr + 255) & -4096 */
226 		"   slgr  %4,%1\n"
227 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
228 		"   jnh   6f\n"
229 		"4: mvcs  0(%4,%1),0(%2),%3\n"
230 		"9: slgr  %0,%4\n"
231 		"   j     6f\n"
232 		"5: slgr  %0,%0\n"
233 		"6: sacf  768\n"
234 		EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
235 		EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
236 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
237 		: : "cc", "memory");
238 	disable_sacf_uaccess(old_fs);
239 	return size;
240 }
241 
raw_copy_to_user(void __user * to,const void * from,unsigned long n)242 unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
243 {
244 	if (copy_with_mvcos())
245 		return copy_to_user_mvcos(to, from, n);
246 	return copy_to_user_mvcs(to, from, n);
247 }
248 EXPORT_SYMBOL(raw_copy_to_user);
249 
copy_in_user_mvcos(void __user * to,const void __user * from,unsigned long size)250 static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
251 					       unsigned long size)
252 {
253 	register unsigned long reg0 asm("0") = 0x010001UL;
254 	unsigned long tmp1, tmp2;
255 
256 	tmp1 = -4096UL;
257 	/* FIXME: copy with reduced length. */
258 	asm volatile(
259 		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
260 		"   jz	  2f\n"
261 		"1: algr  %0,%3\n"
262 		"   slgr  %1,%3\n"
263 		"   slgr  %2,%3\n"
264 		"   j	  0b\n"
265 		"2:slgr  %0,%0\n"
266 		"3: \n"
267 		EX_TABLE(0b,3b)
268 		: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
269 		: "d" (reg0) : "cc", "memory");
270 	return size;
271 }
272 
copy_in_user_mvc(void __user * to,const void __user * from,unsigned long size)273 static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
274 					     unsigned long size)
275 {
276 	mm_segment_t old_fs;
277 	unsigned long tmp1;
278 
279 	old_fs = enable_sacf_uaccess();
280 	asm volatile(
281 		"   sacf  256\n"
282 		"   aghi  %0,-1\n"
283 		"   jo	  5f\n"
284 		"   bras  %3,3f\n"
285 		"0: aghi  %0,257\n"
286 		"1: mvc	  0(1,%1),0(%2)\n"
287 		"   la	  %1,1(%1)\n"
288 		"   la	  %2,1(%2)\n"
289 		"   aghi  %0,-1\n"
290 		"   jnz	  1b\n"
291 		"   j	  5f\n"
292 		"2: mvc	  0(256,%1),0(%2)\n"
293 		"   la	  %1,256(%1)\n"
294 		"   la	  %2,256(%2)\n"
295 		"3: aghi  %0,-256\n"
296 		"   jnm	  2b\n"
297 		"4: ex	  %0,1b-0b(%3)\n"
298 		"5: slgr  %0,%0\n"
299 		"6: sacf  768\n"
300 		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
301 		: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
302 		: : "cc", "memory");
303 	disable_sacf_uaccess(old_fs);
304 	return size;
305 }
306 
raw_copy_in_user(void __user * to,const void __user * from,unsigned long n)307 unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
308 {
309 	if (copy_with_mvcos())
310 		return copy_in_user_mvcos(to, from, n);
311 	return copy_in_user_mvc(to, from, n);
312 }
313 EXPORT_SYMBOL(raw_copy_in_user);
314 
clear_user_mvcos(void __user * to,unsigned long size)315 static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
316 {
317 	register unsigned long reg0 asm("0") = 0x010000UL;
318 	unsigned long tmp1, tmp2;
319 
320 	tmp1 = -4096UL;
321 	asm volatile(
322 		"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
323 		"   jz	  4f\n"
324 		"1: algr  %0,%2\n"
325 		"   slgr  %1,%2\n"
326 		"   j	  0b\n"
327 		"2: la	  %3,4095(%1)\n"/* %4 = to + 4095 */
328 		"   nr	  %3,%2\n"	/* %4 = (to + 4095) & -4096 */
329 		"   slgr  %3,%1\n"
330 		"   clgr  %0,%3\n"	/* copy crosses next page boundary? */
331 		"   jnh	  5f\n"
332 		"3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
333 		"   slgr  %0,%3\n"
334 		"   j	  5f\n"
335 		"4: slgr  %0,%0\n"
336 		"5:\n"
337 		EX_TABLE(0b,2b) EX_TABLE(3b,5b)
338 		: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
339 		: "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
340 	return size;
341 }
342 
clear_user_xc(void __user * to,unsigned long size)343 static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
344 {
345 	mm_segment_t old_fs;
346 	unsigned long tmp1, tmp2;
347 
348 	old_fs = enable_sacf_uaccess();
349 	asm volatile(
350 		"   sacf  256\n"
351 		"   aghi  %0,-1\n"
352 		"   jo    5f\n"
353 		"   bras  %3,3f\n"
354 		"   xc    0(1,%1),0(%1)\n"
355 		"0: aghi  %0,257\n"
356 		"   la    %2,255(%1)\n" /* %2 = ptr + 255 */
357 		"   srl   %2,12\n"
358 		"   sll   %2,12\n"	/* %2 = (ptr + 255) & -4096 */
359 		"   slgr  %2,%1\n"
360 		"   clgr  %0,%2\n"	/* clear crosses next page boundary? */
361 		"   jnh   5f\n"
362 		"   aghi  %2,-1\n"
363 		"1: ex    %2,0(%3)\n"
364 		"   aghi  %2,1\n"
365 		"   slgr  %0,%2\n"
366 		"   j     5f\n"
367 		"2: xc    0(256,%1),0(%1)\n"
368 		"   la    %1,256(%1)\n"
369 		"3: aghi  %0,-256\n"
370 		"   jnm   2b\n"
371 		"4: ex    %0,0(%3)\n"
372 		"5: slgr  %0,%0\n"
373 		"6: sacf  768\n"
374 		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
375 		: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
376 		: : "cc", "memory");
377 	disable_sacf_uaccess(old_fs);
378 	return size;
379 }
380 
__clear_user(void __user * to,unsigned long size)381 unsigned long __clear_user(void __user *to, unsigned long size)
382 {
383 	if (copy_with_mvcos())
384 			return clear_user_mvcos(to, size);
385 	return clear_user_xc(to, size);
386 }
387 EXPORT_SYMBOL(__clear_user);
388 
strnlen_user_srst(const char __user * src,unsigned long size)389 static inline unsigned long strnlen_user_srst(const char __user *src,
390 					      unsigned long size)
391 {
392 	register unsigned long reg0 asm("0") = 0;
393 	unsigned long tmp1, tmp2;
394 
395 	asm volatile(
396 		"   la    %2,0(%1)\n"
397 		"   la    %3,0(%0,%1)\n"
398 		"   slgr  %0,%0\n"
399 		"   sacf  256\n"
400 		"0: srst  %3,%2\n"
401 		"   jo    0b\n"
402 		"   la    %0,1(%3)\n"	/* strnlen_user results includes \0 */
403 		"   slgr  %0,%1\n"
404 		"1: sacf  768\n"
405 		EX_TABLE(0b,1b)
406 		: "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
407 		: "d" (reg0) : "cc", "memory");
408 	return size;
409 }
410 
__strnlen_user(const char __user * src,unsigned long size)411 unsigned long __strnlen_user(const char __user *src, unsigned long size)
412 {
413 	mm_segment_t old_fs;
414 	unsigned long len;
415 
416 	if (unlikely(!size))
417 		return 0;
418 	old_fs = enable_sacf_uaccess();
419 	len = strnlen_user_srst(src, size);
420 	disable_sacf_uaccess(old_fs);
421 	return len;
422 }
423 EXPORT_SYMBOL(__strnlen_user);
424 
__strncpy_from_user(char * dst,const char __user * src,long size)425 long __strncpy_from_user(char *dst, const char __user *src, long size)
426 {
427 	size_t done, len, offset, len_str;
428 
429 	if (unlikely(size <= 0))
430 		return 0;
431 	done = 0;
432 	do {
433 		offset = (size_t)src & (L1_CACHE_BYTES - 1);
434 		len = min(size - done, L1_CACHE_BYTES - offset);
435 		if (copy_from_user(dst, src, len))
436 			return -EFAULT;
437 		len_str = strnlen(dst, len);
438 		done += len_str;
439 		src += len_str;
440 		dst += len_str;
441 	} while ((len_str == len) && (done < size));
442 	return done;
443 }
444 EXPORT_SYMBOL(__strncpy_from_user);
445