aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/powerpc/powerpc64/memset.S
blob: f107f8b40a9c9c3b19b1aad70dfba2d779638317 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
/* Optimized memset implementation for PowerPC64.
   Copyright (C) 1997-2013 Free Software Foundation, Inc.
   This file is part of the GNU C Library.

   The GNU C Library is free software; you can redistribute it and/or
   modify it under the terms of the GNU Lesser General Public
   License as published by the Free Software Foundation; either
   version 2.1 of the License, or (at your option) any later version.

   The GNU C Library is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public
   License along with the GNU C Library; if not, see
   <http://www.gnu.org/licenses/>.  */

#include <sysdep.h>
#include <bp-sym.h>
#include <bp-asm.h>

	.section	".toc","aw"
.LC0:
	.tc __cache_line_size[TC],__cache_line_size
	.section	".text"
	.align 2

/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
   Returns 's'.

   The memset is done in three sizes: byte (8 bits), word (32 bits),
   cache line (256 bits). There is a special case for setting cache lines
   to 0, to take advantage of the dcbz instruction.  */

EALIGN (BP_SYM (memset), 5, 0)
	CALL_MCOUNT 3

#define rTMP	r0
#define rRTN	r3	/* Initial value of 1st argument.  */
#if __BOUNDED_POINTERS__
# define rMEMP0	r4	/* Original value of 1st arg.  */
# define rCHR	r5	/* Char to set in each byte.  */
# define rLEN	r6	/* Length of region to set.  */
# define rMEMP	r10	/* Address at which we are storing.  */
#else
# define rMEMP0	r3	/* Original value of 1st arg.  */
# define rCHR	r4	/* Char to set in each byte.  */
# define rLEN	r5	/* Length of region to set.  */
# define rMEMP	r6	/* Address at which we are storing.  */
#endif
#define rALIGN	r7	/* Number of bytes we are setting now (when aligning). */
#define rMEMP2	r8

#define rNEG64	r8	/* Constant -64 for clearing with dcbz.  */
#define rCLS	r8	/* Cache line size obtained from static.  */
#define rCLM	r9	/* Cache line size mask to check for cache alignment.  */
L(_memset):
#if __BOUNDED_POINTERS__
	cmpldi	cr1, rRTN, 0
	CHECK_BOUNDS_BOTH_WIDE (rMEMP0, rTMP, rTMP2, rLEN)
	beq	cr1, L(b0)
	STORE_RETURN_VALUE (rMEMP0)
	STORE_RETURN_BOUNDS (rTMP, rTMP2)
L(b0):
#endif
/* Take care of case for size <= 4.  */
	cmpldi	cr1, rLEN, 8
	andi.	rALIGN, rMEMP0, 7
	mr	rMEMP, rMEMP0
	ble-	cr1, L(small)

/* Align to doubleword boundary.  */
	cmpldi	cr5, rLEN, 31
	rlwimi	rCHR, rCHR, 8, 16, 23 /* Replicate byte to halfword.  */
	beq+	L(aligned2)
	mtcrf	0x01, rMEMP0
	subfic	rALIGN, rALIGN, 8
	cror	28,30,31		/* Detect odd word aligned.  */
	add	rMEMP, rMEMP, rALIGN
	sub	rLEN, rLEN, rALIGN
	rlwimi	rCHR, rCHR, 16, 0, 15 /* Replicate halfword to word.  */
	bt	29, L(g4)
/* Process the even word of doubleword.  */
	bf+	31, L(g2)
	stb	rCHR, 0(rMEMP0)
	bt	30, L(g4x)
L(g2):
	sth	rCHR, -6(rMEMP)
L(g4x):
	stw	rCHR, -4(rMEMP)
	b	L(aligned)
/* Process the odd word of doubleword.  */
L(g4):
	bf	28, L(g4x) /* If false, word aligned on odd word.  */
	bf+	31, L(g0)
	stb	rCHR, 0(rMEMP0)
	bt	30, L(aligned)
L(g0):
	sth	rCHR, -2(rMEMP)

/* Handle the case of size < 31.  */
L(aligned2):
	rlwimi	rCHR, rCHR, 16, 0, 15 /* Replicate halfword to word.  */
L(aligned):
	mtcrf	0x01, rLEN
	ble	cr5, L(medium)
/* Align to 32-byte boundary.  */
	andi.	rALIGN, rMEMP, 0x18
	subfic	rALIGN, rALIGN, 0x20
	insrdi	rCHR,rCHR,32,0 /* Replicate word to double word. */
	beq	L(caligned)
	mtcrf	0x01, rALIGN
	add	rMEMP, rMEMP, rALIGN
	sub	rLEN, rLEN, rALIGN
	cmplwi	cr1, rALIGN, 0x10
	mr	rMEMP2, rMEMP
	bf	28, L(a1)
	stdu	rCHR, -8(rMEMP2)
L(a1):	blt	cr1, L(a2)
	std	rCHR, -8(rMEMP2)
	stdu	rCHR, -16(rMEMP2)
L(a2):

/* Now aligned to a 32 byte boundary.  */
L(caligned):
	cmpldi	cr1, rCHR, 0
	clrrdi.	rALIGN, rLEN, 5
	mtcrf	0x01, rLEN
	beq	cr1, L(zloopstart) /* Special case for clearing memory using dcbz.  */
L(nondcbz):
	srdi	rTMP, rALIGN, 5
	mtctr	rTMP
	beq	L(medium)	/* We may not actually get to do a full line.  */
	clrldi.	rLEN, rLEN, 59
	add	rMEMP, rMEMP, rALIGN
	li	rNEG64, -0x40
	bdz	L(cloopdone)

L(c3):	dcbtst	rNEG64, rMEMP
	std	rCHR, -8(rMEMP)
	std	rCHR, -16(rMEMP)
	std	rCHR, -24(rMEMP)
	stdu	rCHR, -32(rMEMP)
	bdnz	L(c3)
L(cloopdone):
	std	rCHR, -8(rMEMP)
	std	rCHR, -16(rMEMP)
	cmpldi	cr1, rLEN, 16
	std	rCHR, -24(rMEMP)
	stdu	rCHR, -32(rMEMP)
	beqlr
	add	rMEMP, rMEMP, rALIGN
	b	L(medium_tail2)

	.align 5
/* Clear lines of memory in 128-byte chunks.  */
L(zloopstart):
/* If the remaining length is less the 32 bytes, don't bother getting
	 the cache line size.  */
	beq	L(medium)
	ld	rCLS,.LC0@toc(r2)
	lwz	rCLS,0(rCLS)
/* If the cache line size was not set just goto to L(nondcbz) which is
	 safe for any cache line size.  */
	cmpldi	cr1,rCLS,0
	beq		cr1,L(nondcbz)


/* Now we know the cache line size, and it is not 32-bytes, but
	 we may not yet be aligned to the cache line. May have a partial
	 line to fill, so touch it 1st.  */
	dcbt	0,rMEMP
	addi	rCLM,rCLS,-1
L(getCacheAligned):
	cmpldi	cr1,rLEN,32
	and.	rTMP,rCLM,rMEMP
	blt		cr1,L(handletail32)
	beq		L(cacheAligned)
	addi	rMEMP,rMEMP,32
	addi	rLEN,rLEN,-32
	std		rCHR,-32(rMEMP)
	std		rCHR,-24(rMEMP)
	std		rCHR,-16(rMEMP)
	std		rCHR,-8(rMEMP)
	b		L(getCacheAligned)

/* Now we are aligned to the cache line and can use dcbz.  */
L(cacheAligned):
	cmpld	cr1,rLEN,rCLS
	blt		cr1,L(handletail32)
	dcbz	0,rMEMP
	subf	rLEN,rCLS,rLEN
	add		rMEMP,rMEMP,rCLS
	b		L(cacheAligned)

/* We are here because the cache line size was set and was not 32-bytes
   and the remainder (rLEN) is less than the actual cache line size.
   So set up the preconditions for L(nondcbz) and go there.  */
L(handletail32):
	clrrwi.	rALIGN, rLEN, 5
	b		L(nondcbz)

	.align 5
L(small):
/* Memset of 8 bytes or less.  */
	cmpldi	cr6, rLEN, 4
	cmpldi	cr5, rLEN, 1
	ble	cr6,L(le4)
	subi	rLEN, rLEN, 4
	stb	rCHR,0(rMEMP)
	stb	rCHR,1(rMEMP)
	stb	rCHR,2(rMEMP)
	stb	rCHR,3(rMEMP)
	addi	rMEMP,rMEMP, 4
	cmpldi	cr5, rLEN, 1
L(le4):
	cmpldi	cr1, rLEN, 3
	bltlr	cr5
	stb	rCHR, 0(rMEMP)
	beqlr	cr5
	stb	rCHR, 1(rMEMP)
	bltlr	cr1
	stb	rCHR, 2(rMEMP)
	beqlr	cr1
	stb	rCHR, 3(rMEMP)
	blr

/* Memset of 0-31 bytes.  */
	.align 5
L(medium):
	insrdi	rCHR,rCHR,32,0 /* Replicate word to double word.  */
	cmpldi	cr1, rLEN, 16
L(medium_tail2):
	add	rMEMP, rMEMP, rLEN
L(medium_tail):
	bt-	31, L(medium_31t)
	bt-	30, L(medium_30t)
L(medium_30f):
	bt-	29, L(medium_29t)
L(medium_29f):
	bge-	cr1, L(medium_27t)
	bflr-	28
	std	rCHR, -8(rMEMP)
	blr

L(medium_31t):
	stbu	rCHR, -1(rMEMP)
	bf-	30, L(medium_30f)
L(medium_30t):
	sthu	rCHR, -2(rMEMP)
	bf-	29, L(medium_29f)
L(medium_29t):
	stwu	rCHR, -4(rMEMP)
	blt-	cr1, L(medium_27f)
L(medium_27t):
	std	rCHR, -8(rMEMP)
	stdu	rCHR, -16(rMEMP)
L(medium_27f):
	bflr-	28
L(medium_28t):
	std	rCHR, -8(rMEMP)
	blr
END_GEN_TB (BP_SYM (memset),TB_TOCLESS)
libc_hidden_builtin_def (memset)

/* Copied from bzero.S to prevent the linker from inserting a stub
   between bzero and memset.  */
ENTRY (BP_SYM (__bzero))
	CALL_MCOUNT 3
#if __BOUNDED_POINTERS__
	mr	r6,r4
	li	r5,0
	mr	r4,r3
	/* Tell memset that we don't want a return value.  */
	li	r3,0
	b	L(_memset)
#else
	mr	r5,r4
	li	r4,0
	b	L(_memset)
#endif
END_GEN_TB (BP_SYM (__bzero),TB_TOCLESS)

weak_alias (BP_SYM (__bzero), BP_SYM (bzero))