1/*
2 * Userspace and service handler hooks
3 *
4 * Copyright (c) 2017 Linaro Limited
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 *
8 */
9
10#include <zephyr/toolchain.h>
11#include <zephyr/linker/sections.h>
12#include <offsets_short.h>
13#include <zephyr/syscall.h>
14
15#include <zephyr/arch/arm/exception.h>
16
17#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
18#include <zephyr/arch/cpu.h>
19#endif
20
21_ASM_FILE_PROLOGUE
22
23GTEXT(z_arm_userspace_enter)
24GTEXT(z_arm_do_syscall)
25GTEXT(arch_user_string_nlen)
26GTEXT(z_arm_user_string_nlen_fault_start)
27GTEXT(z_arm_user_string_nlen_fault_end)
28GTEXT(z_arm_user_string_nlen_fixup)
29GDATA(_kernel)
30
31/* Imports */
32GDATA(_k_syscall_table)
33
34/**
35 *
36 * User space entry function
37 *
38 * This function is the entry point to user mode from privileged execution.
39 * The conversion is one way, and threads which transition to user mode do
40 * not transition back later, unless they are doing system calls.
41 *
42 * The function is invoked as:
43 * z_arm_userspace_enter(user_entry, p1, p2, p3,
44 *                        stack_info.start, stack_info.size);
45 */
46SECTION_FUNC(TEXT,z_arm_userspace_enter)
47    /* move user_entry to lr */
48    mov lr, r0
49
50    /* prepare to set stack to privileged stack */
51    ldr r0, =_kernel
52    ldr r0, [r0, #_kernel_offset_to_current]
53#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
54    /* move p1 to ip */
55    mov ip, r1
56    ldr r1, =_thread_offset_to_priv_stack_start
57    ldr r0, [r0, r1]    /* priv stack ptr */
58    ldr r1, =CONFIG_PRIVILEGED_STACK_SIZE
59    add r0, r0, r1
60    /* Restore p1 from ip */
61    mov r1, ip
62#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
63    ldr r0, [r0, #_thread_offset_to_priv_stack_start]    /* priv stack ptr */
64    ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
65    add r0, r0, ip
66#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
67    ldr r0, [r0, #_thread_offset_to_priv_stack_start]    /* priv stack ptr */
68    ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
69    add r0, r0, ip
70
71    ldr ip, =_kernel
72    ldr ip, [ip, #_kernel_offset_to_current]
73    str r0, [ip, #_thread_offset_to_priv_stack_end] /* priv stack end */
74#endif
75
76    /* store current stack pointer to ip
77     * the current stack pointer is needed to retrieve
78     * stack_info.start and stack_info.size
79     */
80    mov ip, sp
81
82#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
83    mov sp, r0
84#else
85    /* set stack to privileged stack
86     *
87     * Note [applies only when CONFIG_BUILTIN_STACK_GUARD is enabled]:
88     * modifying PSP via MSR instruction is not subject to stack limit
89     * checking, so we do not need to clear PSPLIM before setting PSP.
90     * The operation is safe since, by design, the privileged stack is
91     * located in memory higher than the default (user) thread stack.
92     */
93    msr PSP, r0
94#endif
95
96#if defined(CONFIG_BUILTIN_STACK_GUARD)
97    /* At this point the privileged stack is not yet protected by PSPLIM.
98     * Since we have just switched to the top of the privileged stack, we
99     * are safe, as long as the stack can accommodate the maximum exception
100     * stack frame.
101     */
102
103    /* set stack pointer limit to the start of the priv stack */
104    ldr r0, =_kernel
105    ldr r0, [r0, #_kernel_offset_to_current]
106    ldr r0, [r0, #_thread_offset_to_priv_stack_start]    /* priv stack ptr */
107    msr PSPLIM, r0
108#endif
109
110    /* push args to stack */
111    push {r1,r2,r3,lr}
112#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
113    mov r1, ip
114    push {r0,r1}
115#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
116    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
117    push {r0,ip}
118#endif
119
120    /* Re-program dynamic memory map.
121     *
122     * Important note:
123     * z_arm_configure_dynamic_mpu_regions() may re-program the MPU Stack Guard
124     * to guard the privilege stack for overflows (if building with option
125     * CONFIG_MPU_STACK_GUARD). There is a risk of actually overflowing the
126     * stack while doing the re-programming. We minimize the risk by placing
127     * this function immediately after we have switched to the privileged stack
128     * so that the whole stack area is available for this critical operation.
129     *
130     * Note that the risk for overflow is higher if using the normal thread
131     * stack, since we do not control how much stack is actually left, when
132     * user invokes z_arm_userspace_enter().
133     */
134    ldr r0, =_kernel
135    ldr r0, [r0, #_kernel_offset_to_current]
136    bl z_arm_configure_dynamic_mpu_regions
137
138#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
139    pop {r0,r3}
140
141    /* load up stack info from user stack */
142    ldr r0, [r3]
143    ldr r3, [r3, #4]
144    mov ip, r3
145
146    push {r0,r3}
147#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
148    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
149    pop {r0,ip}
150
151    /* load up stack info from user stack */
152    ldr r0, [ip]
153    ldr ip, [ip, #4]
154
155    push {r0,ip}
156#endif
157
158    /* clear the user stack area to clean out privileged data */
159    /* from right past the guard right up to the end */
160    mov r2, ip
161#ifdef CONFIG_INIT_STACKS
162    ldr r1,=0xaaaaaaaa
163#else
164    eors r1, r1
165#endif
166    bl memset
167
168#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
169    pop {r0, r1}
170    mov ip, r1
171#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
172    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
173    pop {r0,ip}
174#endif
175
176    /* r0 contains user stack start, ip contains user stack size */
177    add r0, r0, ip   /* calculate top of stack */
178
179    /* pop remaining arguments from stack before switching stacks */
180#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
181    /* Use r4 to pop lr, then restore r4 */
182    mov ip, r4
183    pop {r1,r2,r3,r4}
184    mov lr, r4
185    mov r4, ip
186#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
187    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
188    pop {r1,r2,r3,lr}
189#endif
190
191#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
192    /*
193     * set stack to user stack.  We are in SYSTEM state, so r13 and r14 are
194     * shared with USER state
195     */
196    mov sp, r0
197#else
198#if defined(CONFIG_BUILTIN_STACK_GUARD)
199    /*
200     * Guard the default (user) stack until thread drops privileges.
201     *
202     * Notes:
203     * PSPLIM is configured *before* PSP switches to the default (user) stack.
204     * This is safe, since the user stack is located, by design, in a lower
205     * memory area compared to the privileged stack.
206     *
207     * However, we need to prevent a context-switch to occur, because that
208     * would re-configure PSPLIM to guard the privileged stack; we enforce
209     * a PendSV locking for this purporse.
210     *
211     * Between PSPLIM update and PSP switch, the privileged stack will be
212     * left un-guarded; this is safe, as long as the privileged stack is
213     * large enough to accommodate a maximum exception stack frame.
214     */
215
216    /* Temporarily store current IRQ locking status in ip */
217    mrs ip, BASEPRI
218    push {r0, ip}
219
220    /* Lock PendSV while reprogramming PSP and PSPLIM */
221    mov r0, #_EXC_PENDSV_PRIO_MASK
222    msr BASEPRI_MAX, r0
223    isb
224
225    /* Set PSPLIM to guard the thread's user stack. */
226    ldr r0, =_kernel
227    ldr r0, [r0, #_kernel_offset_to_current]
228    ldr r0, [r0, #_thread_offset_to_stack_info_start]
229    msr PSPLIM, r0
230
231    pop {r0, ip}
232#endif
233
234    /* set stack to user stack */
235    msr PSP, r0
236#endif
237
238#if defined(CONFIG_BUILTIN_STACK_GUARD)
239    /* Restore interrupt lock status */
240    msr BASEPRI, ip
241    isb
242#endif
243
244    /* restore r0 */
245    mov r0, lr
246
247#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
248    /* change processor mode to unprivileged, with all interrupts enabled. */
249    msr CPSR_c, #MODE_USR
250#else
251    /* change processor mode to unprivileged */
252#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
253    push {r0, r1, r2, r3}
254    ldr r0, =_kernel
255    ldr r0, [r0, #_kernel_offset_to_current]
256    ldr r1, =_thread_offset_to_mode
257    ldr r1, [r0, r1]
258    movs r2, #1
259    orrs r1, r1, r2
260    mrs r3, CONTROL
261    orrs r3, r3, r2
262    mov ip, r3
263    /* Store (unprivileged) mode in thread's mode state variable */
264    ldr r2, =_thread_offset_to_mode
265    str r1, [r0, r2]
266#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
267    push {r0, r1}
268    ldr r0, =_kernel
269    ldr r0, [r0, #_kernel_offset_to_current]
270    ldr r1, [r0, #_thread_offset_to_mode]
271    orrs r1, r1, #1
272    mrs ip, CONTROL
273    orrs ip, ip, #1
274    /* Store (unprivileged) mode in thread's mode state variable */
275    str r1, [r0, #_thread_offset_to_mode]
276#endif
277    dsb
278    msr CONTROL, ip
279#endif
280
281    /* ISB is not strictly necessary here (stack pointer is not being
282     * touched), but it's recommended to avoid executing pre-fetched
283     * instructions with the previous privilege.
284     */
285    isb
286#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
287    pop {r0, r1, r2, r3}
288#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
289    pop {r0, r1}
290#endif
291
292    /* jump to z_thread_entry entry */
293#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
294    push {r0, r1}
295    ldr r0, =z_thread_entry
296    mov ip, r0
297    pop {r0, r1}
298#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
299    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
300    ldr ip, =z_thread_entry
301#endif
302    bx ip
303
304/**
305 *
306 * Userspace system call function
307 *
308 * This function is used to do system calls from unprivileged code.  This
309 * function is responsible for the following:
310 * 1) Fixing up bad syscalls
311 * 2) Configuring privileged stack and loading up stack arguments
312 * 3) Dispatching the system call
313 * 4) Restoring stack and calling back to the caller of the SVC
314 *
315 */
316SECTION_FUNC(TEXT, z_arm_do_syscall)
317
318    /* Note [when using MPU-based stack guarding]:
319     * The function is executing in privileged mode. This implies that we
320     * shall not be allowed to use the thread's default unprivileged stack,
321     * (i.e push to or pop from it), to avoid a possible stack corruption.
322     *
323     * Rationale: since we execute in PRIV mode and no MPU guard
324     * is guarding the end of the default stack, we won't be able
325     * to detect any stack overflows.
326     *
327     * Note [when using built-in stack limit checking on ARMv8-M]:
328     * At this point PSPLIM is already configured to guard the default (user)
329     * stack, so pushing to the default thread's stack is safe.
330     */
331#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
332    /* save current stack pointer (user stack) */
333    mov ip, sp
334    /* temporarily push to user stack */
335    push {r0,r1}
336    /* setup privileged stack */
337    ldr r0, =_kernel
338    ldr r0, [r0, #_kernel_offset_to_current]
339    adds r0, r0, #_thread_offset_to_priv_stack_start
340    ldr r0, [r0]    /* priv stack ptr */
341    ldr r1, =CONFIG_PRIVILEGED_STACK_SIZE
342    add r0, r1
343
344    /* Store current SP and LR at the beginning of the priv stack */
345    subs r0, #8
346    mov r1, ip
347    str r1, [r0, #0]
348    mov r1, lr
349    str r1, [r0, #4]
350    mov ip, r0
351    /* Restore user stack and original r0, r1 */
352    pop {r0, r1}
353
354#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
355    /* setup privileged stack */
356    ldr ip, =_kernel
357    ldr ip, [ip, #_kernel_offset_to_current]
358    ldr ip, [ip, #_thread_offset_to_priv_stack_start]    /* priv stack ptr */
359    add ip, #CONFIG_PRIVILEGED_STACK_SIZE
360
361    /* Store current SP and LR at the beginning of the priv stack */
362    subs ip, #8
363    str sp, [ip, #0]
364    str lr, [ip, #4]
365#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
366    /*
367     * The SVC handler has already switched to the privileged stack.
368     * Store the user SP and LR at the beginning of the priv stack.
369     */
370    ldr ip, =_kernel
371    ldr ip, [ip, #_kernel_offset_to_current]
372    ldr ip, [ip, #_thread_offset_to_sp_usr]
373    push {ip, lr}
374#endif
375
376#if !defined(CONFIG_CPU_AARCH32_CORTEX_R)
377    /* switch to privileged stack */
378    msr PSP, ip
379#endif
380
381    /* Note (applies when using stack limit checking):
382     * We do not need to lock IRQs after switching PSP to the privileged stack;
383     * PSPLIM is guarding the default (user) stack, which, by design, is
384     * located at *lower* memory area. Since we switch to the top of the
385     * privileged stack we are safe, as long as the stack can accommodate
386     * the maximum exception stack frame.
387     */
388
389#if defined(CONFIG_BUILTIN_STACK_GUARD)
390    /* Set stack pointer limit (needed in privileged mode) */
391    ldr ip, =_kernel
392    ldr ip, [ip, #_kernel_offset_to_current]
393    ldr ip, [ip, #_thread_offset_to_priv_stack_start]    /* priv stack ptr */
394    msr PSPLIM, ip
395#endif
396
397    /*
398     * r0-r5 contain arguments
399     * r6 contains call_id
400     * r8 contains original LR
401     */
402#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
403    /* save  r0, r1 to ip, lr */
404    mov ip, r0
405    mov lr, r1
406    ldr r0, =K_SYSCALL_BAD
407    cmp r6, r0
408    bne valid_syscall
409
410    /* BAD SYSCALL path */
411    /* fixup stack frame on the privileged stack, adding ssf */
412    mov r1, sp
413    /* ssf is present in r1 (sp) */
414    push {r1,lr}
415    push {r4,r5}
416    /* restore r0, r1 */
417    mov r0, ip
418    mov r1, lr
419    b dispatch_syscall
420valid_syscall:
421    /* push ssf to privileged stack */
422    mov r1, sp
423    push {r1}
424    /* push args to complete stack frame */
425    push {r4,r5}
426
427dispatch_syscall:
428    /* original r0 is saved in ip */
429    ldr r0, =_k_syscall_table
430    lsls r6, #2
431    add r0, r6
432    ldr r0, [r0]	/* load table address */
433    /* swap ip and r0, restore r1 from lr */
434    mov r1, ip
435    mov ip, r0
436    mov r0, r1
437    mov r1, lr
438    /* execute function from dispatch table */
439    blx ip
440
441    /* restore LR
442     * r0 holds the return value and needs to be preserved
443     */
444    mov ip, r0
445    mov r0, sp
446    ldr r0, [r0,#16]
447    mov lr, r0
448    /* Restore r0 */
449    mov r0, ip
450
451#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
452    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
453    ldr ip, =K_SYSCALL_BAD
454    cmp r6, ip
455    bne valid_syscall
456
457    /* BAD SYSCALL path */
458    /* fixup stack frame on the privileged stack, adding ssf */
459    mov ip, sp
460    push {r4,r5,ip,lr}
461    b dispatch_syscall
462
463valid_syscall:
464    /* push args to complete stack frame */
465    mov ip, sp
466    push {r4,r5,ip}
467
468dispatch_syscall:
469    ldr ip, =_k_syscall_table
470    lsl r6, #2
471    add ip, r6
472    ldr ip, [ip]	/* load table address */
473
474    /* execute function from dispatch table */
475    blx ip
476
477    /* restore LR */
478    ldr lr, [sp,#16]
479#endif
480
481
482#if defined(CONFIG_BUILTIN_STACK_GUARD)
483    /*
484     * Guard the default (user) stack until thread drops privileges.
485     *
486     * Notes:
487     * PSPLIM is configured *before* PSP switches to the default (user) stack.
488     * This is safe, since the user stack is located, by design, in a lower
489     * memory area compared to the privileged stack.
490     *
491     * However, we need to prevent a context-switch to occur, because that
492     * would re-configure PSPLIM to guard the privileged stack; we enforce
493     * a PendSV locking for this purporse.
494     *
495     * Between PSPLIM update and PSP switch, the privileged stack will be
496     * left un-guarded; this is safe, as long as the privileged stack is
497     * large enough to accommodate a maximum exception stack frame.
498     */
499
500    /* Temporarily store current IRQ locking status in r2 */
501    mrs r2, BASEPRI
502
503    /* Lock PendSV while reprogramming PSP and PSPLIM */
504    mov r3, #_EXC_PENDSV_PRIO_MASK
505    msr BASEPRI_MAX, r3
506    isb
507
508    /* Set PSPLIM to guard the thread's user stack. */
509    ldr r3, =_kernel
510    ldr r3, [r3, #_kernel_offset_to_current]
511    ldr r3, [r3, #_thread_offset_to_stack_info_start]    /* stack_info.start */
512    msr PSPLIM, r3
513#endif
514
515#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
516    /* set stack back to unprivileged stack */
517    mov ip, r0
518    mov r0, sp
519    ldr r0, [r0,#12]
520    msr PSP, r0
521    /* Restore r0 */
522    mov r0, ip
523
524#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
525    /* set stack back to unprivileged stack */
526    ldr ip, [sp,#12]
527    msr PSP, ip
528#endif
529
530#if defined(CONFIG_BUILTIN_STACK_GUARD)
531    /* Restore interrupt lock status */
532    msr BASEPRI, r2
533    isb
534#endif
535
536    push {r0, r1}
537#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
538    push {r2, r3}
539    ldr r0, =_kernel
540    ldr r0, [r0, #_kernel_offset_to_current]
541    ldr r2, =_thread_offset_to_mode
542    ldr r1, [r0, r2]
543    movs r3, #1
544    orrs r1, r1, r3
545    /* Store (unprivileged) mode in thread's mode state variable */
546    str r1, [r0, r2]
547    dsb
548    /* drop privileges by setting bit 0 in CONTROL */
549    mrs r2, CONTROL
550    orrs r2, r2, r3
551    msr CONTROL, r2
552    pop {r2, r3}
553#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
554    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
555    ldr r0, =_kernel
556    ldr r0, [r0, #_kernel_offset_to_current]
557    ldr r1, [r0, #_thread_offset_to_mode]
558    orrs r1, r1, #1
559    /* Store (unprivileged) mode in thread's mode state variable */
560    str r1, [r0, #_thread_offset_to_mode]
561    dsb
562#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
563    /* drop privileges by setting bit 0 in CONTROL */
564    mrs ip, CONTROL
565    orrs ip, ip, #1
566    msr CONTROL, ip
567#endif
568#endif
569
570    /* ISB is not strictly necessary here (stack pointer is not being
571     * touched), but it's recommended to avoid executing pre-fetched
572     * instructions with the previous privilege.
573     */
574    isb
575    pop {r0, r1}
576
577#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
578  /* Zero out volatile (caller-saved) registers so as to not leak state from
579   * kernel mode. The C calling convention for the syscall handler will
580   * restore the others to original values.
581   */
582   movs r2, #0
583   movs r3, #0
584
585   /*
586    * return back to original function that called SVC, add 1 to force thumb
587    * mode
588    */
589
590    /* Save return value temporarily to ip */
591    mov ip, r0
592
593    mov r0, r8
594    movs r1, #1
595    orrs r0, r0, r1
596
597    /* swap ip, r0 */
598    mov r1, ip
599    mov ip, r0
600    mov r0, r1
601    movs r1, #0
602#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
603
604    /* Zero out volatile (caller-saved) registers so as to not leak state from
605     * kernel mode. The C calling convention for the syscall handler will
606     * restore the others to original values.
607     */
608    mov r1, #0
609    mov r2, #0
610    mov r3, #0
611
612    /*
613     * return back to original function that called SVC, add 1 to force thumb
614     * mode
615     */
616    mov ip, r8
617    orrs ip, ip, #1
618#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
619    /* Restore user stack pointer */
620    ldr ip, [sp,#12]
621    mov sp, ip
622
623    /* Zero out volatile (caller-saved) registers so as to not leak state from
624     * kernel mode. The C calling convention for the syscall handler will
625     * restore the others to original values.
626     */
627    mov r1, #0
628    mov r2, #0
629    mov r3, #0
630
631    /*
632     * return back to original function that called SVC
633     */
634    mov ip, r8
635    cps #MODE_USR
636#endif
637
638    bx ip
639
640
641/*
642 * size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
643 */
644SECTION_FUNC(TEXT, arch_user_string_nlen)
645    push {r0, r1, r2, r4, r5, lr}
646
647    /* sp+4 is error value, init to -1 */
648#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
649    || defined(CONFIG_CPU_AARCH32_CORTEX_R)
650    ldr r3, =-1
651#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
652    mov.w r3, #-1
653#endif
654    str	r3, [sp, #4]
655
656    /* Perform string length calculation */
657    movs r3, #0		/* r3 is the counter */
658
659strlen_loop:
660z_arm_user_string_nlen_fault_start:
661    /* r0 contains the string. r5 = *(r0 + r3]). This could fault. */
662    ldrb r5, [r0, r3]
663
664z_arm_user_string_nlen_fault_end:
665#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
666    cmp r5, #0
667    beq strlen_done
668
669    cmp r3, r1
670    beq strlen_done
671
672    adds r3, #1
673    b strlen_loop
674#else
675#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
676    cmp r5, #0
677    beq strlen_done
678#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
679    cbz	r5, strlen_done
680#endif
681    cmp	r3, r1
682    beq.n strlen_done
683
684    adds r3, #1
685    b.n	strlen_loop
686#endif
687
688strlen_done:
689    /* Move length calculation from r3 to r0 (return value register) */
690    mov	r0, r3
691
692    /* Clear error value since we succeeded */
693    movs r1, #0
694    str	r1, [sp, #4]
695
696z_arm_user_string_nlen_fixup:
697    /* Write error value to err pointer parameter */
698    ldr	r1, [sp, #4]
699    str	r1, [r2, #0]
700
701    add	sp, #12
702    pop	{r4, r5, pc}
703