1 /*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: releng/12.0/sys/i386/i386/support.s 334520 2018-06-02 04:25:09Z bde $
30 */
31
32 #include <machine/asmacros.h>
33 #include <machine/cputypes.h>
34 #include <machine/pmap.h>
35 #include <machine/specialreg.h>
36
37 #include "assym.inc"
38
39 #define IDXSHIFT 10
40
41 .text
42
43 /*
44 * bcopy family
45 * void bzero(void *buf, u_int len)
46 */
47 ENTRY(bzero)
48 pushl %edi
49 movl 8(%esp),%edi
50 movl 12(%esp),%ecx
51 xorl %eax,%eax
52 shrl $2,%ecx
53 rep
54 stosl
55 movl 12(%esp),%ecx
56 andl $3,%ecx
57 rep
58 stosb
59 popl %edi
60 ret
61 END(bzero)
62
63 ENTRY(sse2_pagezero)
64 pushl %ebx
65 movl 8(%esp),%ecx
66 movl %ecx,%eax
67 addl $4096,%eax
68 xor %ebx,%ebx
69 jmp 1f
70 /*
71 * The loop takes 14 bytes. Ensure that it doesn't cross a 16-byte
72 * cache line.
73 */
74 .p2align 4,0x90
75 1:
76 movnti %ebx,(%ecx)
77 movnti %ebx,4(%ecx)
78 addl $8,%ecx
79 cmpl %ecx,%eax
80 jne 1b
81 sfence
82 popl %ebx
83 ret
84 END(sse2_pagezero)
85
86 ENTRY(i686_pagezero)
87 pushl %edi
88 pushl %ebx
89
90 movl 12(%esp),%edi
91 movl $1024,%ecx
92
93 ALIGN_TEXT
94 1:
95 xorl %eax,%eax
96 repe
97 scasl
98 jnz 2f
99
100 popl %ebx
101 popl %edi
102 ret
103
104 ALIGN_TEXT
105
106 2:
107 incl %ecx
108 subl $4,%edi
109
110 movl %ecx,%edx
111 cmpl $16,%ecx
112
113 jge 3f
114
115 movl %edi,%ebx
116 andl $0x3f,%ebx
117 shrl %ebx
118 shrl %ebx
119 movl $16,%ecx
120 subl %ebx,%ecx
121
122 3:
123 subl %ecx,%edx
124 rep
125 stosl
126
127 movl %edx,%ecx
128 testl %edx,%edx
129 jnz 1b
130
131 popl %ebx
132 popl %edi
133 ret
134 END(i686_pagezero)
135
136 /* fillw(pat, base, cnt) */
137 ENTRY(fillw)
138 pushl %edi
139 movl 8(%esp),%eax
140 movl 12(%esp),%edi
141 movl 16(%esp),%ecx
142 rep
143 stosw
144 popl %edi
145 ret
146 END(fillw)
147
148 /*
149 * memmove(dst, src, cnt) (return dst)
150 * bcopy(src, dst, cnt)
151 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
152 */
153 ENTRY(bcopy)
154 movl 4(%esp),%eax
155 movl 8(%esp),%edx
156 movl %eax,8(%esp)
157 movl %edx,4(%esp)
158 MEXITCOUNT
159 jmp memmove
160 END(bcopy)
161
162 ENTRY(memmove)
163 pushl %ebp
164 movl %esp,%ebp
165 pushl %esi
166 pushl %edi
167 movl 8(%ebp),%edi
168 movl 12(%ebp),%esi
169 1:
170 movl 16(%ebp),%ecx
171
172 movl %edi,%eax
173 subl %esi,%eax
174 cmpl %ecx,%eax /* overlapping && src < dst? */
175 jb 1f
176
177 shrl $2,%ecx /* copy by 32-bit words */
178 rep
179 movsl
180 movl 16(%ebp),%ecx
181 andl $3,%ecx /* any bytes left? */
182 rep
183 movsb
184 popl %edi
185 popl %esi
186 movl 8(%ebp),%eax /* return dst for memmove */
187 popl %ebp
188 ret
189
190 ALIGN_TEXT
191 1:
192 addl %ecx,%edi /* copy backwards */
193 addl %ecx,%esi
194 decl %edi
195 decl %esi
196 andl $3,%ecx /* any fractional bytes? */
197 std
198 rep
199 movsb
200 movl 16(%ebp),%ecx /* copy remainder by 32-bit words */
201 shrl $2,%ecx
202 subl $3,%esi
203 subl $3,%edi
204 rep
205 movsl
206 popl %edi
207 popl %esi
208 cld
209 movl 8(%ebp),%eax /* return dst for memmove */
210 popl %ebp
211 ret
212 END(memmove)
213
214 /*
215 * Note: memcpy does not support overlapping copies
216 */
217 ENTRY(memcpy)
218 pushl %edi
219 pushl %esi
220 movl 12(%esp),%edi
221 movl 16(%esp),%esi
222 movl 20(%esp),%ecx
223 movl %edi,%eax
224 shrl $2,%ecx /* copy by 32-bit words */
225 rep
226 movsl
227 movl 20(%esp),%ecx
228 andl $3,%ecx /* any bytes left? */
229 rep
230 movsb
231 popl %esi
232 popl %edi
233 ret
234 END(memcpy)
235
236 /*
237 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
238 */
239 ENTRY(copystr)
240 pushl %esi
241 pushl %edi
242
243 movl 12(%esp),%esi /* %esi = from */
244 movl 16(%esp),%edi /* %edi = to */
245 movl 20(%esp),%edx /* %edx = maxlen */
246 incl %edx
247 1:
248 decl %edx
249 jz 4f
250 lodsb
251 stosb
252 orb %al,%al
253 jnz 1b
254
255 /* Success -- 0 byte reached */
256 decl %edx
257 xorl %eax,%eax
258 jmp 6f
259 4:
260 /* edx is zero -- return ENAMETOOLONG */
261 movl $ENAMETOOLONG,%eax
262
263 6:
264 /* set *lencopied and return %eax */
265 movl 20(%esp),%ecx
266 subl %edx,%ecx
267 movl 24(%esp),%edx
268 testl %edx,%edx
269 jz 7f
270 movl %ecx,(%edx)
271 7:
272 popl %edi
273 popl %esi
274 ret
275 END(copystr)
276
277 ENTRY(bcmp)
278 pushl %edi
279 pushl %esi
280 movl 12(%esp),%edi
281 movl 16(%esp),%esi
282 movl 20(%esp),%edx
283
284 movl %edx,%ecx
285 shrl $2,%ecx
286 repe
287 cmpsl
288 jne 1f
289
290 movl %edx,%ecx
291 andl $3,%ecx
292 repe
293 cmpsb
294 1:
295 setne %al
296 movsbl %al,%eax
297 popl %esi
298 popl %edi
299 ret
300 END(bcmp)
301
302 /*
303 * Handling of special 386 registers and descriptor tables etc
304 */
305 /* void lgdt(struct region_descriptor *rdp); */
306 ENTRY(lgdt)
307 /* reload the descriptor table */
308 movl 4(%esp),%eax
309 lgdt (%eax)
310
311 /* flush the prefetch q */
312 jmp 1f
313 nop
314 1:
315 /* reload "stale" selectors */
316 movl $KDSEL,%eax
317 movl %eax,%ds
318 movl %eax,%es
319 movl %eax,%gs
320 movl %eax,%ss
321 movl $KPSEL,%eax
322 movl %eax,%fs
323
324 /* reload code selector by turning return into intersegmental return */
325 movl (%esp),%eax
326 pushl %eax
327 movl $KCSEL,4(%esp)
328 MEXITCOUNT
329 lret
330 END(lgdt)
331
332 /* ssdtosd(*ssdp,*sdp) */
333 ENTRY(ssdtosd)
334 pushl %ebx
335 movl 8(%esp),%ecx
336 movl 8(%ecx),%ebx
337 shll $16,%ebx
338 movl (%ecx),%edx
339 roll $16,%edx
340 movb %dh,%bl
341 movb %dl,%bh
342 rorl $8,%ebx
343 movl 4(%ecx),%eax
344 movw %ax,%dx
345 andl $0xf0000,%eax
346 orl %eax,%ebx
347 movl 12(%esp),%ecx
348 movl %edx,(%ecx)
349 movl %ebx,4(%ecx)
350 popl %ebx
351 ret
352 END(ssdtosd)
353
354 /* void reset_dbregs() */
355 ENTRY(reset_dbregs)
356 movl $0,%eax
357 movl %eax,%dr7 /* disable all breakpoints first */
358 movl %eax,%dr0
359 movl %eax,%dr1
360 movl %eax,%dr2
361 movl %eax,%dr3
362 movl %eax,%dr6
363 ret
364 END(reset_dbregs)
365
366 /*****************************************************************************/
367 /* setjump, longjump */
368 /*****************************************************************************/
369
370 ENTRY(setjmp)
371 movl 4(%esp),%eax
372 movl %ebx,(%eax) /* save ebx */
373 movl %esp,4(%eax) /* save esp */
374 movl %ebp,8(%eax) /* save ebp */
375 movl %esi,12(%eax) /* save esi */
376 movl %edi,16(%eax) /* save edi */
377 movl (%esp),%edx /* get rta */
378 movl %edx,20(%eax) /* save eip */
379 xorl %eax,%eax /* return(0); */
380 ret
381 END(setjmp)
382
383 ENTRY(longjmp)
384 movl 4(%esp),%eax
385 movl (%eax),%ebx /* restore ebx */
386 movl 4(%eax),%esp /* restore esp */
387 movl 8(%eax),%ebp /* restore ebp */
388 movl 12(%eax),%esi /* restore esi */
389 movl 16(%eax),%edi /* restore edi */
390 movl 20(%eax),%edx /* get rta */
391 movl %edx,(%esp) /* put in return frame */
392 xorl %eax,%eax /* return(1); */
393 incl %eax
394 ret
395 END(longjmp)
396
397 /*
398 * Support for reading MSRs in the safe manner. (Instead of panic on #gp,
399 * return an error.)
400 */
401 ENTRY(rdmsr_safe)
402 /* int rdmsr_safe(u_int msr, uint64_t *data) */
403 movl PCPU(CURPCB),%ecx
404 movl $msr_onfault,PCB_ONFAULT(%ecx)
405
406 movl 4(%esp),%ecx
407 rdmsr
408 movl 8(%esp),%ecx
409 movl %eax,(%ecx)
410 movl %edx,4(%ecx)
411 xorl %eax,%eax
412
413 movl PCPU(CURPCB),%ecx
414 movl %eax,PCB_ONFAULT(%ecx)
415
416 ret
417
418 /*
419 * Support for writing MSRs in the safe manner. (Instead of panic on #gp,
420 * return an error.)
421 */
422 ENTRY(wrmsr_safe)
423 /* int wrmsr_safe(u_int msr, uint64_t data) */
424 movl PCPU(CURPCB),%ecx
425 movl $msr_onfault,PCB_ONFAULT(%ecx)
426
427 movl 4(%esp),%ecx
428 movl 8(%esp),%eax
429 movl 12(%esp),%edx
430 wrmsr
431 xorl %eax,%eax
432
433 movl PCPU(CURPCB),%ecx
434 movl %eax,PCB_ONFAULT(%ecx)
435
436 ret
437
438 /*
439 * MSR operations fault handler
440 */
441 ALIGN_TEXT
442 msr_onfault:
443 movl PCPU(CURPCB),%ecx
444 movl $0,PCB_ONFAULT(%ecx)
445 movl $EFAULT,%eax
446 ret
447
448 ENTRY(handle_ibrs_entry)
449 cmpb $0,hw_ibrs_active
450 je 1f
451 movl $MSR_IA32_SPEC_CTRL,%ecx
452 rdmsr
453 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
454 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32,%edx
455 wrmsr
456 movb $1,PCPU(IBPB_SET)
457 /*
458 * i386 does not implement SMEP, but the 4/4 split makes this not
459 * that important.
460 */
461 1: ret
462 END(handle_ibrs_entry)
463
464 ENTRY(handle_ibrs_exit)
465 cmpb $0,PCPU(IBPB_SET)
466 je 1f
467 movl $MSR_IA32_SPEC_CTRL,%ecx
468 rdmsr
469 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
470 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
471 wrmsr
472 movb $0,PCPU(IBPB_SET)
473 1: ret
474 END(handle_ibrs_exit)
Cache object: c800cb686ad5094f16bc47be2753eb87
|