1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $FreeBSD: releng/8.2/sys/amd64/amd64/support.S 212005 2010-08-30 18:23:48Z delphij $
31 */
32
33 #include "opt_ddb.h"
34
35 #include <machine/asmacros.h>
36 #include <machine/intr_machdep.h>
37 #include <machine/pmap.h>
38
39 #include "assym.s"
40
41 .data
42 ALIGN_DATA
43 .globl intrcnt, eintrcnt
44 intrcnt:
45 .space INTRCNT_COUNT * 8
46 eintrcnt:
47
48 .globl intrnames, eintrnames
49 intrnames:
50 .space INTRCNT_COUNT * (MAXCOMLEN + 1)
51 eintrnames:
52
53 .text
54
55 /*
56 * bcopy family
57 * void bzero(void *buf, u_int len)
58 */
59
60 /* done */
61 ENTRY(bzero)
62 movq %rsi,%rcx
63 xorl %eax,%eax
64 shrq $3,%rcx
65 cld
66 rep
67 stosq
68 movq %rsi,%rcx
69 andq $7,%rcx
70 rep
71 stosb
72 ret
73 END(bzero)
74
75 /* Address: %rdi */
76 ENTRY(pagezero)
77 movq $-PAGE_SIZE,%rdx
78 subq %rdx,%rdi
79 xorl %eax,%eax
80 1:
81 movnti %rax,(%rdi,%rdx)
82 movnti %rax,8(%rdi,%rdx)
83 movnti %rax,16(%rdi,%rdx)
84 movnti %rax,24(%rdi,%rdx)
85 addq $32,%rdx
86 jne 1b
87 sfence
88 ret
89 END(pagezero)
90
91 ENTRY(bcmp)
92 movq %rdx,%rcx
93 shrq $3,%rcx
94 cld /* compare forwards */
95 repe
96 cmpsq
97 jne 1f
98
99 movq %rdx,%rcx
100 andq $7,%rcx
101 repe
102 cmpsb
103 1:
104 setne %al
105 movsbl %al,%eax
106 ret
107 END(bcmp)
108
109 /*
110 * bcopy(src, dst, cnt)
111 * rdi, rsi, rdx
112 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
113 */
114 ENTRY(bcopy)
115 xchgq %rsi,%rdi
116 movq %rdx,%rcx
117
118 movq %rdi,%rax
119 subq %rsi,%rax
120 cmpq %rcx,%rax /* overlapping && src < dst? */
121 jb 1f
122
123 shrq $3,%rcx /* copy by 64-bit words */
124 cld /* nope, copy forwards */
125 rep
126 movsq
127 movq %rdx,%rcx
128 andq $7,%rcx /* any bytes left? */
129 rep
130 movsb
131 ret
132
133 /* ALIGN_TEXT */
134 1:
135 addq %rcx,%rdi /* copy backwards */
136 addq %rcx,%rsi
137 decq %rdi
138 decq %rsi
139 andq $7,%rcx /* any fractional bytes? */
140 std
141 rep
142 movsb
143 movq %rdx,%rcx /* copy remainder by 32-bit words */
144 shrq $3,%rcx
145 subq $7,%rsi
146 subq $7,%rdi
147 rep
148 movsq
149 cld
150 ret
151 END(bcopy)
152
153 /*
154 * Note: memcpy does not support overlapping copies
155 */
156 ENTRY(memcpy)
157 movq %rdx,%rcx
158 shrq $3,%rcx /* copy by 64-bit words */
159 cld /* copy forwards */
160 rep
161 movsq
162 movq %rdx,%rcx
163 andq $7,%rcx /* any bytes left? */
164 rep
165 movsb
166 ret
167 END(memcpy)
168
169 /*
170 * pagecopy(%rdi=from, %rsi=to)
171 */
172 ENTRY(pagecopy)
173 movq $-PAGE_SIZE,%rax
174 movq %rax,%rdx
175 subq %rax,%rdi
176 subq %rax,%rsi
177 1:
178 prefetchnta (%rdi,%rax)
179 addq $64,%rax
180 jne 1b
181 2:
182 movq (%rdi,%rdx),%rax
183 movnti %rax,(%rsi,%rdx)
184 movq 8(%rdi,%rdx),%rax
185 movnti %rax,8(%rsi,%rdx)
186 movq 16(%rdi,%rdx),%rax
187 movnti %rax,16(%rsi,%rdx)
188 movq 24(%rdi,%rdx),%rax
189 movnti %rax,24(%rsi,%rdx)
190 addq $32,%rdx
191 jne 2b
192 sfence
193 ret
194 END(pagecopy)
195
196 /* fillw(pat, base, cnt) */
197 /* %rdi,%rsi, %rdx */
198 ENTRY(fillw)
199 movq %rdi,%rax
200 movq %rsi,%rdi
201 movq %rdx,%rcx
202 cld
203 rep
204 stosw
205 ret
206 END(fillw)
207
208 /*****************************************************************************/
209 /* copyout and fubyte family */
210 /*****************************************************************************/
211 /*
212 * Access user memory from inside the kernel. These routines should be
213 * the only places that do this.
214 *
215 * These routines set curpcb->onfault for the time they execute. When a
216 * protection violation occurs inside the functions, the trap handler
217 * returns to *curpcb->onfault instead of the function.
218 */
219
220 /*
221 * copyout(from_kernel, to_user, len) - MP SAFE
222 * %rdi, %rsi, %rdx
223 */
224 ENTRY(copyout)
225 movq PCPU(CURPCB),%rax
226 movq $copyout_fault,PCB_ONFAULT(%rax)
227 testq %rdx,%rdx /* anything to do? */
228 jz done_copyout
229
230 /*
231 * Check explicitly for non-user addresses. If 486 write protection
232 * is being used, this check is essential because we are in kernel
233 * mode so the h/w does not provide any protection against writing
234 * kernel addresses.
235 */
236
237 /*
238 * First, prevent address wrapping.
239 */
240 movq %rsi,%rax
241 addq %rdx,%rax
242 jc copyout_fault
243 /*
244 * XXX STOP USING VM_MAXUSER_ADDRESS.
245 * It is an end address, not a max, so every time it is used correctly it
246 * looks like there is an off by one error, and of course it caused an off
247 * by one error in several places.
248 */
249 movq $VM_MAXUSER_ADDRESS,%rcx
250 cmpq %rcx,%rax
251 ja copyout_fault
252
253 xchgq %rdi,%rsi
254 /* bcopy(%rsi, %rdi, %rdx) */
255 movq %rdx,%rcx
256
257 shrq $3,%rcx
258 cld
259 rep
260 movsq
261 movb %dl,%cl
262 andb $7,%cl
263 rep
264 movsb
265
266 done_copyout:
267 xorl %eax,%eax
268 movq PCPU(CURPCB),%rdx
269 movq %rax,PCB_ONFAULT(%rdx)
270 ret
271
272 ALIGN_TEXT
273 copyout_fault:
274 movq PCPU(CURPCB),%rdx
275 movq $0,PCB_ONFAULT(%rdx)
276 movq $EFAULT,%rax
277 ret
278 END(copyout)
279
280 /*
281 * copyin(from_user, to_kernel, len) - MP SAFE
282 * %rdi, %rsi, %rdx
283 */
284 ENTRY(copyin)
285 movq PCPU(CURPCB),%rax
286 movq $copyin_fault,PCB_ONFAULT(%rax)
287 testq %rdx,%rdx /* anything to do? */
288 jz done_copyin
289
290 /*
291 * make sure address is valid
292 */
293 movq %rdi,%rax
294 addq %rdx,%rax
295 jc copyin_fault
296 movq $VM_MAXUSER_ADDRESS,%rcx
297 cmpq %rcx,%rax
298 ja copyin_fault
299
300 xchgq %rdi,%rsi
301 movq %rdx,%rcx
302 movb %cl,%al
303 shrq $3,%rcx /* copy longword-wise */
304 cld
305 rep
306 movsq
307 movb %al,%cl
308 andb $7,%cl /* copy remaining bytes */
309 rep
310 movsb
311
312 done_copyin:
313 xorl %eax,%eax
314 movq PCPU(CURPCB),%rdx
315 movq %rax,PCB_ONFAULT(%rdx)
316 ret
317
318 ALIGN_TEXT
319 copyin_fault:
320 movq PCPU(CURPCB),%rdx
321 movq $0,PCB_ONFAULT(%rdx)
322 movq $EFAULT,%rax
323 ret
324 END(copyin)
325
326 /*
327 * casuword32. Compare and set user integer. Returns -1 or the current value.
328 * dst = %rdi, old = %rsi, new = %rdx
329 */
330 ENTRY(casuword32)
331 movq PCPU(CURPCB),%rcx
332 movq $fusufault,PCB_ONFAULT(%rcx)
333
334 movq $VM_MAXUSER_ADDRESS-4,%rax
335 cmpq %rax,%rdi /* verify address is valid */
336 ja fusufault
337
338 movl %esi,%eax /* old */
339 #ifdef SMP
340 lock
341 #endif
342 cmpxchgl %edx,(%rdi) /* new = %edx */
343
344 /*
345 * The old value is in %eax. If the store succeeded it will be the
346 * value we expected (old) from before the store, otherwise it will
347 * be the current value.
348 */
349
350 movq PCPU(CURPCB),%rcx
351 movq $0,PCB_ONFAULT(%rcx)
352 ret
353 END(casuword32)
354
355 /*
356 * casuword. Compare and set user word. Returns -1 or the current value.
357 * dst = %rdi, old = %rsi, new = %rdx
358 */
359 ENTRY(casuword)
360 movq PCPU(CURPCB),%rcx
361 movq $fusufault,PCB_ONFAULT(%rcx)
362
363 movq $VM_MAXUSER_ADDRESS-4,%rax
364 cmpq %rax,%rdi /* verify address is valid */
365 ja fusufault
366
367 movq %rsi,%rax /* old */
368 #ifdef SMP
369 lock
370 #endif
371 cmpxchgq %rdx,(%rdi) /* new = %rdx */
372
373 /*
374 * The old value is in %eax. If the store succeeded it will be the
375 * value we expected (old) from before the store, otherwise it will
376 * be the current value.
377 */
378
379 movq PCPU(CURPCB),%rcx
380 movq $fusufault,PCB_ONFAULT(%rcx)
381 movq $0,PCB_ONFAULT(%rcx)
382 ret
383 END(casuword)
384
385 /*
386 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
387 * byte from user memory. All these functions are MPSAFE.
388 * addr = %rdi
389 */
390
391 ALTENTRY(fuword64)
392 ENTRY(fuword)
393 movq PCPU(CURPCB),%rcx
394 movq $fusufault,PCB_ONFAULT(%rcx)
395
396 movq $VM_MAXUSER_ADDRESS-8,%rax
397 cmpq %rax,%rdi /* verify address is valid */
398 ja fusufault
399
400 movq (%rdi),%rax
401 movq $0,PCB_ONFAULT(%rcx)
402 ret
403 END(fuword64)
404 END(fuword)
405
406 ENTRY(fuword32)
407 movq PCPU(CURPCB),%rcx
408 movq $fusufault,PCB_ONFAULT(%rcx)
409
410 movq $VM_MAXUSER_ADDRESS-4,%rax
411 cmpq %rax,%rdi /* verify address is valid */
412 ja fusufault
413
414 movl (%rdi),%eax
415 movq $0,PCB_ONFAULT(%rcx)
416 ret
417 END(fuword32)
418
419 /*
420 * fuswintr() and suswintr() are specialized variants of fuword16() and
421 * suword16(), respectively. They are called from the profiling code,
422 * potentially at interrupt time. If they fail, that's okay; good things
423 * will happen later. They always fail for now, until the trap code is
424 * able to deal with this.
425 */
426 ALTENTRY(suswintr)
427 ENTRY(fuswintr)
428 movq $-1,%rax
429 ret
430 END(suswintr)
431 END(fuswintr)
432
433 ENTRY(fuword16)
434 movq PCPU(CURPCB),%rcx
435 movq $fusufault,PCB_ONFAULT(%rcx)
436
437 movq $VM_MAXUSER_ADDRESS-2,%rax
438 cmpq %rax,%rdi
439 ja fusufault
440
441 movzwl (%rdi),%eax
442 movq $0,PCB_ONFAULT(%rcx)
443 ret
444 END(fuword16)
445
446 ENTRY(fubyte)
447 movq PCPU(CURPCB),%rcx
448 movq $fusufault,PCB_ONFAULT(%rcx)
449
450 movq $VM_MAXUSER_ADDRESS-1,%rax
451 cmpq %rax,%rdi
452 ja fusufault
453
454 movzbl (%rdi),%eax
455 movq $0,PCB_ONFAULT(%rcx)
456 ret
457 END(fubyte)
458
459 ALIGN_TEXT
460 fusufault:
461 movq PCPU(CURPCB),%rcx
462 xorl %eax,%eax
463 movq %rax,PCB_ONFAULT(%rcx)
464 decq %rax
465 ret
466
467 /*
468 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
469 * user memory. All these functions are MPSAFE.
470 * addr = %rdi, value = %rsi
471 */
472 ALTENTRY(suword64)
473 ENTRY(suword)
474 movq PCPU(CURPCB),%rcx
475 movq $fusufault,PCB_ONFAULT(%rcx)
476
477 movq $VM_MAXUSER_ADDRESS-8,%rax
478 cmpq %rax,%rdi /* verify address validity */
479 ja fusufault
480
481 movq %rsi,(%rdi)
482 xorl %eax,%eax
483 movq PCPU(CURPCB),%rcx
484 movq %rax,PCB_ONFAULT(%rcx)
485 ret
486 END(suword64)
487 END(suword)
488
489 ENTRY(suword32)
490 movq PCPU(CURPCB),%rcx
491 movq $fusufault,PCB_ONFAULT(%rcx)
492
493 movq $VM_MAXUSER_ADDRESS-4,%rax
494 cmpq %rax,%rdi /* verify address validity */
495 ja fusufault
496
497 movl %esi,(%rdi)
498 xorl %eax,%eax
499 movq PCPU(CURPCB),%rcx
500 movq %rax,PCB_ONFAULT(%rcx)
501 ret
502 END(suword32)
503
504 ENTRY(suword16)
505 movq PCPU(CURPCB),%rcx
506 movq $fusufault,PCB_ONFAULT(%rcx)
507
508 movq $VM_MAXUSER_ADDRESS-2,%rax
509 cmpq %rax,%rdi /* verify address validity */
510 ja fusufault
511
512 movw %si,(%rdi)
513 xorl %eax,%eax
514 movq PCPU(CURPCB),%rcx /* restore trashed register */
515 movq %rax,PCB_ONFAULT(%rcx)
516 ret
517 END(suword16)
518
519 ENTRY(subyte)
520 movq PCPU(CURPCB),%rcx
521 movq $fusufault,PCB_ONFAULT(%rcx)
522
523 movq $VM_MAXUSER_ADDRESS-1,%rax
524 cmpq %rax,%rdi /* verify address validity */
525 ja fusufault
526
527 movl %esi,%eax
528 movb %al,(%rdi)
529 xorl %eax,%eax
530 movq PCPU(CURPCB),%rcx /* restore trashed register */
531 movq %rax,PCB_ONFAULT(%rcx)
532 ret
533 END(subyte)
534
535 /*
536 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
537 * %rdi, %rsi, %rdx, %rcx
538 *
539 * copy a string from from to to, stop when a 0 character is reached.
540 * return ENAMETOOLONG if string is longer than maxlen, and
541 * EFAULT on protection violations. If lencopied is non-zero,
542 * return the actual length in *lencopied.
543 */
544 ENTRY(copyinstr)
545 movq %rdx,%r8 /* %r8 = maxlen */
546 movq %rcx,%r9 /* %r9 = *len */
547 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
548 movq PCPU(CURPCB),%rcx
549 movq $cpystrflt,PCB_ONFAULT(%rcx)
550
551 movq $VM_MAXUSER_ADDRESS,%rax
552
553 /* make sure 'from' is within bounds */
554 subq %rsi,%rax
555 jbe cpystrflt
556
557 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
558 cmpq %rdx,%rax
559 jae 1f
560 movq %rax,%rdx
561 movq %rax,%r8
562 1:
563 incq %rdx
564 cld
565
566 2:
567 decq %rdx
568 jz 3f
569
570 lodsb
571 stosb
572 orb %al,%al
573 jnz 2b
574
575 /* Success -- 0 byte reached */
576 decq %rdx
577 xorl %eax,%eax
578 jmp cpystrflt_x
579 3:
580 /* rdx is zero - return ENAMETOOLONG or EFAULT */
581 movq $VM_MAXUSER_ADDRESS,%rax
582 cmpq %rax,%rsi
583 jae cpystrflt
584 4:
585 movq $ENAMETOOLONG,%rax
586 jmp cpystrflt_x
587
588 cpystrflt:
589 movq $EFAULT,%rax
590
591 cpystrflt_x:
592 /* set *lencopied and return %eax */
593 movq PCPU(CURPCB),%rcx
594 movq $0,PCB_ONFAULT(%rcx)
595
596 testq %r9,%r9
597 jz 1f
598 subq %rdx,%r8
599 movq %r8,(%r9)
600 1:
601 ret
602 END(copyinstr)
603
604 /*
605 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
606 * %rdi, %rsi, %rdx, %rcx
607 */
608 ENTRY(copystr)
609 movq %rdx,%r8 /* %r8 = maxlen */
610
611 xchgq %rdi,%rsi
612 incq %rdx
613 cld
614 1:
615 decq %rdx
616 jz 4f
617 lodsb
618 stosb
619 orb %al,%al
620 jnz 1b
621
622 /* Success -- 0 byte reached */
623 decq %rdx
624 xorl %eax,%eax
625 jmp 6f
626 4:
627 /* rdx is zero -- return ENAMETOOLONG */
628 movq $ENAMETOOLONG,%rax
629
630 6:
631
632 testq %rcx,%rcx
633 jz 7f
634 /* set *lencopied and return %rax */
635 subq %rdx,%r8
636 movq %r8,(%rcx)
637 7:
638 ret
639 END(copystr)
640
641 /*
642 * Handling of special amd64 registers and descriptor tables etc
643 * %rdi
644 */
645 /* void lgdt(struct region_descriptor *rdp); */
646 ENTRY(lgdt)
647 /* reload the descriptor table */
648 lgdt (%rdi)
649
650 /* flush the prefetch q */
651 jmp 1f
652 nop
653 1:
654 movl $KDSEL,%eax
655 movl %eax,%ds
656 movl %eax,%es
657 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
658 movl %eax,%gs
659 movl %eax,%ss
660
661 /* reload code selector by turning return into intersegmental return */
662 popq %rax
663 pushq $KCSEL
664 pushq %rax
665 MEXITCOUNT
666 lretq
667 END(lgdt)
668
669 /*****************************************************************************/
670 /* setjump, longjump */
671 /*****************************************************************************/
672
673 ENTRY(setjmp)
674 movq %rbx,0(%rdi) /* save rbx */
675 movq %rsp,8(%rdi) /* save rsp */
676 movq %rbp,16(%rdi) /* save rbp */
677 movq %r12,24(%rdi) /* save r12 */
678 movq %r13,32(%rdi) /* save r13 */
679 movq %r14,40(%rdi) /* save r14 */
680 movq %r15,48(%rdi) /* save r15 */
681 movq 0(%rsp),%rdx /* get rta */
682 movq %rdx,56(%rdi) /* save rip */
683 xorl %eax,%eax /* return(0); */
684 ret
685 END(setjmp)
686
687 ENTRY(longjmp)
688 movq 0(%rdi),%rbx /* restore rbx */
689 movq 8(%rdi),%rsp /* restore rsp */
690 movq 16(%rdi),%rbp /* restore rbp */
691 movq 24(%rdi),%r12 /* restore r12 */
692 movq 32(%rdi),%r13 /* restore r13 */
693 movq 40(%rdi),%r14 /* restore r14 */
694 movq 48(%rdi),%r15 /* restore r15 */
695 movq 56(%rdi),%rdx /* get rta */
696 movq %rdx,0(%rsp) /* put in return frame */
697 xorl %eax,%eax /* return(1); */
698 incl %eax
699 ret
700 END(longjmp)
701
702 /*
703 * Support for BB-profiling (gcc -a). The kernbb program will extract
704 * the data from the kernel.
705 */
706
707 .data
708 ALIGN_DATA
709 .globl bbhead
710 bbhead:
711 .quad 0
712
713 .text
714 NON_GPROF_ENTRY(__bb_init_func)
715 movq $1,(%rdi)
716 movq bbhead,%rax
717 movq %rax,32(%rdi)
718 movq %rdi,bbhead
719 NON_GPROF_RET
720
721 /*
722 * Support for reading MSRs in the safe manner.
723 */
724 ENTRY(rdmsr_safe)
725 /* int rdmsr_safe(u_int msr, uint64_t *data) */
726 movq PCPU(CURPCB),%r8
727 movq $msr_onfault,PCB_ONFAULT(%r8)
728 movl %edi,%ecx
729 rdmsr /* Read MSR pointed by %ecx. Returns
730 hi byte in edx, lo in %eax */
731 salq $32,%rdx /* sign-shift %rdx left */
732 movl %eax,%eax /* zero-extend %eax -> %rax */
733 orq %rdx,%rax
734 movq %rax,(%rsi)
735 xorq %rax,%rax
736 movq %rax,PCB_ONFAULT(%r8)
737 ret
738
739 /*
740 * Support for writing MSRs in the safe manner.
741 */
742 ENTRY(wrmsr_safe)
743 /* int wrmsr_safe(u_int msr, uint64_t data) */
744 movq PCPU(CURPCB),%r8
745 movq $msr_onfault,PCB_ONFAULT(%r8)
746 movl %edi,%ecx
747 movl %esi,%eax
748 sarq $32,%rsi
749 movl %esi,%edx
750 wrmsr /* Write MSR pointed by %ecx. Accepts
751 hi byte in edx, lo in %eax. */
752 xorq %rax,%rax
753 movq %rax,PCB_ONFAULT(%r8)
754 ret
755
756 /*
757 * MSR operations fault handler
758 */
759 ALIGN_TEXT
760 msr_onfault:
761 movq $0,PCB_ONFAULT(%r8)
762 movl $EFAULT,%eax
763 ret
Cache object: 3a3623a26224463d5203094f36320500
|