1/*
2   Copyright (c) 2015-2024, Synopsys, Inc. All rights reserved.
3
4   Redistribution and use in source and binary forms, with or without
5   modification, are permitted provided that the following conditions are met:
6
7   1) Redistributions of source code must retain the above copyright notice,
8   this list of conditions and the following disclaimer.
9
10   2) Redistributions in binary form must reproduce the above copyright notice,
11   this list of conditions and the following disclaimer in the documentation
12   and/or other materials provided with the distribution.
13
14   3) Neither the name of the Synopsys, Inc., nor the names of its contributors
15   may be used to endorse or promote products derived from this software
16   without specific prior written permission.
17
18   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28   POSSIBILITY OF SUCH DAMAGE.
29*/
30
31/* This implementation is optimized for performance.  For code size a generic
32   implementation of this function from newlib/libc/string/strcmp.c will be
33   used.  */
34#include <picolibc.h>
35
36#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED) \
37    && !defined (__ARC_RF16__)
38
39#include "asm.h"
40
41/* This is optimized primarily for the ARC700.
42   It would be possible to speed up the loops by one cycle / word
43   respective one cycle / byte by forcing double source 1 alignment, unrolling
44   by a factor of two, and speculatively loading the second word / byte of
45   source 1; however, that would increase the overhead for loop setup / finish,
46   and strcmp might often terminate early.  */
47#ifndef __ARCHS__
48
49ENTRY (strcmp)
50	or	r2,r0,r1
51	bmsk_s	r2,r2,1
52	brne_l	r2,0,.Lcharloop
53	mov_s	r12,0x01010101
54	ror	r5,r12
55.Lwordloop:
56	ld.ab	r2,[r0,4]
57	ld.ab	r3,[r1,4]
58	nop_s
59	sub	r4,r2,r12
60	bic	r4,r4,r2
61	and	r4,r4,r5
62	brne_l	r4,0,.Lfound0
63	breq	r2,r3,.Lwordloop
64#ifdef	__LITTLE_ENDIAN__
65	xor	r0,r2,r3	; mask for difference
66	sub_s	r1,r0,1
67	bic_s	r0,r0,r1	; mask for least significant difference bit
68	sub	r1,r5,r0
69	xor	r0,r5,r1	; mask for least significant difference byte
70	and_s	r2,r2,r0
71	and_s	r3,r3,r0
72#endif /* LITTLE ENDIAN */
73	cmp_s	r2,r3
74	mov_s	r0,1
75	j_s.d	[blink]
76	bset.lo	r0,r0,31
77
78	.balign	4
79#ifdef __LITTLE_ENDIAN__
80.Lfound0:
81	xor	r0,r2,r3	; mask for difference
82	or	r0,r0,r4	; or in zero indicator
83	sub_s	r1,r0,1
84	bic_s	r0,r0,r1	; mask for least significant difference bit
85	sub	r1,r5,r0
86	xor	r0,r5,r1	; mask for least significant difference byte
87	and_s	r2,r2,r0
88	and_s	r3,r3,r0
89	sub.f	r0,r2,r3
90	mov.hi	r0,1
91	j_s.d	[blink]
92	bset.lo	r0,r0,31
93#else /* BIG ENDIAN */
94	/* The zero-detection above can mis-detect 0x01 bytes as zeroes
95	   because of carry-propagateion from a lower significant zero byte.
96	   We can compensate for this by checking that bit0 is zero.
97	   This compensation is not necessary in the step where we
98	   get a low estimate for r2, because in any affected bytes
99	   we already have 0x00 or 0x01, which will remain unchanged
100	   when bit 7 is cleared.  */
101	.balign	4
102.Lfound0:
103#ifdef __ARC_BARREL_SHIFTER__
104	lsr	r0,r4,8
105	lsr_s	r1,r2
106	bic_s	r2,r2,r0	; get low estimate for r2 and get ...
107	bic_s	r0,r0,r1	; <this is the adjusted mask for zeros>
108	or_s	r3,r3,r0	; ... high estimate r3 so that r2 > r3 will ...
109	cmp_s	r3,r2		; ... be independent of trailing garbage
110	or_s	r2,r2,r0	; likewise for r3 > r2
111	bic_s	r3,r3,r0
112	rlc	r0,0		; r0 := r2 > r3 ? 1 : 0
113	cmp_s	r2,r3
114	j_s.d	[blink]
115	bset.lo	r0,r0,31
116#else /* __ARC_BARREL_SHIFTER__ */
117	/* Fall through to .Lcharloop.  */
118	sub_s	r0,r0,4
119	sub_s	r1,r1,4
120#endif /* __ARC_BARREL_SHIFTER__ */
121#endif /* ENDIAN */
122
123	.balign	4
124.Lcharloop:
125	ldb.ab	r2,[r0,1]
126	ldb.ab	r3,[r1,1]
127	nop_s
128	breq_l	r2,0,.Lcmpend
129	breq	r2,r3,.Lcharloop
130.Lcmpend:
131	j_s.d	[blink]
132	sub	r0,r2,r3
133ENDFUNC (strcmp)
134#endif /* !__ARCHS__ */
135
136#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
137