1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/6.1/sys/kern/kern_timeout.c 158179 2006-04-30 16:44:43Z cvs2svn $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/callout.h>
43 #include <sys/condvar.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/sysctl.h>
50
51 static int avg_depth;
52 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
53 "Average number of items examined per softclock call. Units = 1/1000");
54 static int avg_gcalls;
55 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
56 "Average number of Giant callouts made per softclock call. Units = 1/1000");
57 static int avg_mtxcalls;
58 SYSCTL_INT(_debug, OID_AUTO, to_avg_mtxcalls, CTLFLAG_RD, &avg_mtxcalls, 0,
59 "Average number of mtx callouts made per softclock call. Units = 1/1000");
60 static int avg_mpcalls;
61 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
62 "Average number of MP callouts made per softclock call. Units = 1/1000");
63 /*
64 * TODO:
65 * allocate more timeout table slots when table overflows.
66 */
67
68 /* Exported to machdep.c and/or kern_clock.c. */
69 struct callout *callout;
70 struct callout_list callfree;
71 int callwheelsize, callwheelbits, callwheelmask;
72 struct callout_tailq *callwheel;
73 int softticks; /* Like ticks, but for softclock(). */
74 struct mtx callout_lock;
75
76 static struct callout *nextsoftcheck; /* Next callout to be checked. */
77
78 /**
79 * Locked by callout_lock:
80 * curr_callout - If a callout is in progress, it is curr_callout.
81 * If curr_callout is non-NULL, threads waiting on
82 * callout_wait will be woken up as soon as the
83 * relevant callout completes.
84 * curr_cancelled - Changing to 1 with both callout_lock and c_mtx held
85 * guarantees that the current callout will not run.
86 * The softclock() function sets this to 0 before it
87 * drops callout_lock to acquire c_mtx, and it calls
88 * the handler only if curr_cancelled still 0 when
89 * c_mtx is successfully acquired.
90 * wakeup_ctr - Incremented every time a thread wants to wait
91 * for a callout to complete. Modified only when
92 * curr_callout is non-NULL.
93 * wakeup_needed - If a thread is waiting on callout_wait, then
94 * wakeup_needed is nonzero. Increased only when
95 * cutt_callout is non-NULL.
96 */
97 static struct callout *curr_callout;
98 static int curr_cancelled;
99 static int wakeup_ctr;
100 static int wakeup_needed;
101
102 /**
103 * Locked by callout_wait_lock:
104 * callout_wait - If wakeup_needed is set, callout_wait will be
105 * triggered after the current callout finishes.
106 * wakeup_done_ctr - Set to the current value of wakeup_ctr after
107 * callout_wait is triggered.
108 */
109 static struct mtx callout_wait_lock;
110 static struct cv callout_wait;
111 static int wakeup_done_ctr;
112
113 /*
114 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
115 *
116 * This code is called very early in the kernel initialization sequence,
117 * and may be called more then once.
118 */
119 caddr_t
120 kern_timeout_callwheel_alloc(caddr_t v)
121 {
122 /*
123 * Calculate callout wheel size
124 */
125 for (callwheelsize = 1, callwheelbits = 0;
126 callwheelsize < ncallout;
127 callwheelsize <<= 1, ++callwheelbits)
128 ;
129 callwheelmask = callwheelsize - 1;
130
131 callout = (struct callout *)v;
132 v = (caddr_t)(callout + ncallout);
133 callwheel = (struct callout_tailq *)v;
134 v = (caddr_t)(callwheel + callwheelsize);
135 return(v);
136 }
137
138 /*
139 * kern_timeout_callwheel_init() - initialize previously reserved callwheel
140 * space.
141 *
142 * This code is called just once, after the space reserved for the
143 * callout wheel has been finalized.
144 */
145 void
146 kern_timeout_callwheel_init(void)
147 {
148 int i;
149
150 SLIST_INIT(&callfree);
151 for (i = 0; i < ncallout; i++) {
152 callout_init(&callout[i], 0);
153 callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
154 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
155 }
156 for (i = 0; i < callwheelsize; i++) {
157 TAILQ_INIT(&callwheel[i]);
158 }
159 mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
160 mtx_init(&callout_wait_lock, "callout_wait_lock", NULL, MTX_DEF);
161 cv_init(&callout_wait, "callout_wait");
162 }
163
164 /*
165 * The callout mechanism is based on the work of Adam M. Costello and
166 * George Varghese, published in a technical report entitled "Redesigning
167 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
168 * in FreeBSD by Justin T. Gibbs. The original work on the data structures
169 * used in this implementation was published by G. Varghese and T. Lauck in
170 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
171 * the Efficient Implementation of a Timer Facility" in the Proceedings of
172 * the 11th ACM Annual Symposium on Operating Systems Principles,
173 * Austin, Texas Nov 1987.
174 */
175
176 /*
177 * Software (low priority) clock interrupt.
178 * Run periodic events from timeout queue.
179 */
180 void
181 softclock(void *dummy)
182 {
183 struct callout *c;
184 struct callout_tailq *bucket;
185 int curticks;
186 int steps; /* #steps since we last allowed interrupts */
187 int depth;
188 int mpcalls;
189 int mtxcalls;
190 int gcalls;
191 int wakeup_cookie;
192 #ifdef DIAGNOSTIC
193 struct bintime bt1, bt2;
194 struct timespec ts2;
195 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */
196 static timeout_t *lastfunc;
197 #endif
198
199 #ifndef MAX_SOFTCLOCK_STEPS
200 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
201 #endif /* MAX_SOFTCLOCK_STEPS */
202
203 mpcalls = 0;
204 mtxcalls = 0;
205 gcalls = 0;
206 depth = 0;
207 steps = 0;
208 mtx_lock_spin(&callout_lock);
209 while (softticks != ticks) {
210 softticks++;
211 /*
212 * softticks may be modified by hard clock, so cache
213 * it while we work on a given bucket.
214 */
215 curticks = softticks;
216 bucket = &callwheel[curticks & callwheelmask];
217 c = TAILQ_FIRST(bucket);
218 while (c) {
219 depth++;
220 if (c->c_time != curticks) {
221 c = TAILQ_NEXT(c, c_links.tqe);
222 ++steps;
223 if (steps >= MAX_SOFTCLOCK_STEPS) {
224 nextsoftcheck = c;
225 /* Give interrupts a chance. */
226 mtx_unlock_spin(&callout_lock);
227 ; /* nothing */
228 mtx_lock_spin(&callout_lock);
229 c = nextsoftcheck;
230 steps = 0;
231 }
232 } else {
233 void (*c_func)(void *);
234 void *c_arg;
235 struct mtx *c_mtx;
236 int c_flags;
237
238 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
239 TAILQ_REMOVE(bucket, c, c_links.tqe);
240 c_func = c->c_func;
241 c_arg = c->c_arg;
242 c_mtx = c->c_mtx;
243 c_flags = c->c_flags;
244 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
245 c->c_func = NULL;
246 c->c_flags = CALLOUT_LOCAL_ALLOC;
247 SLIST_INSERT_HEAD(&callfree, c,
248 c_links.sle);
249 curr_callout = NULL;
250 } else {
251 c->c_flags =
252 (c->c_flags & ~CALLOUT_PENDING);
253 curr_callout = c;
254 }
255 curr_cancelled = 0;
256 mtx_unlock_spin(&callout_lock);
257 if (c_mtx != NULL) {
258 mtx_lock(c_mtx);
259 /*
260 * The callout may have been cancelled
261 * while we switched locks.
262 */
263 if (curr_cancelled) {
264 mtx_unlock(c_mtx);
265 mtx_lock_spin(&callout_lock);
266 goto done_locked;
267 }
268 /* The callout cannot be stopped now. */
269 curr_cancelled = 1;
270
271 if (c_mtx == &Giant) {
272 gcalls++;
273 CTR1(KTR_CALLOUT, "callout %p",
274 c_func);
275 } else {
276 mtxcalls++;
277 CTR1(KTR_CALLOUT,
278 "callout mtx %p",
279 c_func);
280 }
281 } else {
282 mpcalls++;
283 CTR1(KTR_CALLOUT, "callout mpsafe %p",
284 c_func);
285 }
286 #ifdef DIAGNOSTIC
287 binuptime(&bt1);
288 #endif
289 THREAD_NO_SLEEPING();
290 c_func(c_arg);
291 THREAD_SLEEPING_OK();
292 #ifdef DIAGNOSTIC
293 binuptime(&bt2);
294 bintime_sub(&bt2, &bt1);
295 if (bt2.frac > maxdt) {
296 if (lastfunc != c_func ||
297 bt2.frac > maxdt * 2) {
298 bintime2timespec(&bt2, &ts2);
299 printf(
300 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
301 c_func, c_arg,
302 (intmax_t)ts2.tv_sec,
303 ts2.tv_nsec);
304 }
305 maxdt = bt2.frac;
306 lastfunc = c_func;
307 }
308 #endif
309 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
310 mtx_unlock(c_mtx);
311 mtx_lock_spin(&callout_lock);
312 done_locked:
313 curr_callout = NULL;
314 if (wakeup_needed) {
315 /*
316 * There might be someone waiting
317 * for the callout to complete.
318 */
319 wakeup_cookie = wakeup_ctr;
320 mtx_unlock_spin(&callout_lock);
321 mtx_lock(&callout_wait_lock);
322 cv_broadcast(&callout_wait);
323 wakeup_done_ctr = wakeup_cookie;
324 mtx_unlock(&callout_wait_lock);
325 mtx_lock_spin(&callout_lock);
326 wakeup_needed = 0;
327 }
328 steps = 0;
329 c = nextsoftcheck;
330 }
331 }
332 }
333 avg_depth += (depth * 1000 - avg_depth) >> 8;
334 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
335 avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8;
336 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
337 nextsoftcheck = NULL;
338 mtx_unlock_spin(&callout_lock);
339 }
340
341 /*
342 * timeout --
343 * Execute a function after a specified length of time.
344 *
345 * untimeout --
346 * Cancel previous timeout function call.
347 *
348 * callout_handle_init --
349 * Initialize a handle so that using it with untimeout is benign.
350 *
351 * See AT&T BCI Driver Reference Manual for specification. This
352 * implementation differs from that one in that although an
353 * identification value is returned from timeout, the original
354 * arguments to timeout as well as the identifier are used to
355 * identify entries for untimeout.
356 */
357 struct callout_handle
358 timeout(ftn, arg, to_ticks)
359 timeout_t *ftn;
360 void *arg;
361 int to_ticks;
362 {
363 struct callout *new;
364 struct callout_handle handle;
365
366 mtx_lock_spin(&callout_lock);
367
368 /* Fill in the next free callout structure. */
369 new = SLIST_FIRST(&callfree);
370 if (new == NULL)
371 /* XXX Attempt to malloc first */
372 panic("timeout table full");
373 SLIST_REMOVE_HEAD(&callfree, c_links.sle);
374
375 callout_reset(new, to_ticks, ftn, arg);
376
377 handle.callout = new;
378 mtx_unlock_spin(&callout_lock);
379 return (handle);
380 }
381
382 void
383 untimeout(ftn, arg, handle)
384 timeout_t *ftn;
385 void *arg;
386 struct callout_handle handle;
387 {
388
389 /*
390 * Check for a handle that was initialized
391 * by callout_handle_init, but never used
392 * for a real timeout.
393 */
394 if (handle.callout == NULL)
395 return;
396
397 mtx_lock_spin(&callout_lock);
398 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
399 callout_stop(handle.callout);
400 mtx_unlock_spin(&callout_lock);
401 }
402
403 void
404 callout_handle_init(struct callout_handle *handle)
405 {
406 handle->callout = NULL;
407 }
408
409 /*
410 * New interface; clients allocate their own callout structures.
411 *
412 * callout_reset() - establish or change a timeout
413 * callout_stop() - disestablish a timeout
414 * callout_init() - initialize a callout structure so that it can
415 * safely be passed to callout_reset() and callout_stop()
416 *
417 * <sys/callout.h> defines three convenience macros:
418 *
419 * callout_active() - returns truth if callout has not been stopped,
420 * drained, or deactivated since the last time the callout was
421 * reset.
422 * callout_pending() - returns truth if callout is still waiting for timeout
423 * callout_deactivate() - marks the callout as having been serviced
424 */
425 int
426 callout_reset(c, to_ticks, ftn, arg)
427 struct callout *c;
428 int to_ticks;
429 void (*ftn)(void *);
430 void *arg;
431 {
432 int cancelled = 0;
433
434 #ifdef notyet /* Some callers of timeout() do not hold Giant. */
435 if (c->c_mtx != NULL)
436 mtx_assert(c->c_mtx, MA_OWNED);
437 #endif
438
439 mtx_lock_spin(&callout_lock);
440 if (c == curr_callout) {
441 /*
442 * We're being asked to reschedule a callout which is
443 * currently in progress. If there is a mutex then we
444 * can cancel the callout if it has not really started.
445 */
446 if (c->c_mtx != NULL && !curr_cancelled)
447 cancelled = curr_cancelled = 1;
448 if (wakeup_needed) {
449 /*
450 * Someone has called callout_drain to kill this
451 * callout. Don't reschedule.
452 */
453 mtx_unlock_spin(&callout_lock);
454 return (cancelled);
455 }
456 }
457 if (c->c_flags & CALLOUT_PENDING) {
458 if (nextsoftcheck == c) {
459 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
460 }
461 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c,
462 c_links.tqe);
463
464 cancelled = 1;
465
466 /*
467 * Part of the normal "stop a pending callout" process
468 * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING
469 * flags. We're not going to bother doing that here,
470 * because we're going to be setting those flags ten lines
471 * after this point, and we're holding callout_lock
472 * between now and then.
473 */
474 }
475
476 /*
477 * We could unlock callout_lock here and lock it again before the
478 * TAILQ_INSERT_TAIL, but there's no point since doing this setup
479 * doesn't take much time.
480 */
481 if (to_ticks <= 0)
482 to_ticks = 1;
483
484 c->c_arg = arg;
485 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
486 c->c_func = ftn;
487 c->c_time = ticks + to_ticks;
488 TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
489 c, c_links.tqe);
490 mtx_unlock_spin(&callout_lock);
491
492 return (cancelled);
493 }
494
495 int
496 _callout_stop_safe(c, safe)
497 struct callout *c;
498 int safe;
499 {
500 int use_mtx, wakeup_cookie;
501
502 if (!safe && c->c_mtx != NULL) {
503 #ifdef notyet /* Some callers do not hold Giant for Giant-locked callouts. */
504 mtx_assert(c->c_mtx, MA_OWNED);
505 use_mtx = 1;
506 #else
507 use_mtx = mtx_owned(c->c_mtx);
508 #endif
509 } else {
510 use_mtx = 0;
511 }
512
513 mtx_lock_spin(&callout_lock);
514 /*
515 * Don't attempt to delete a callout that's not on the queue.
516 */
517 if (!(c->c_flags & CALLOUT_PENDING)) {
518 c->c_flags &= ~CALLOUT_ACTIVE;
519 if (c != curr_callout) {
520 mtx_unlock_spin(&callout_lock);
521 return (0);
522 }
523 if (safe) {
524 /* We need to wait until the callout is finished. */
525 wakeup_needed = 1;
526 wakeup_cookie = wakeup_ctr++;
527 mtx_unlock_spin(&callout_lock);
528 mtx_lock(&callout_wait_lock);
529
530 /*
531 * Check to make sure that softclock() didn't
532 * do the wakeup in between our dropping
533 * callout_lock and picking up callout_wait_lock
534 */
535 if (wakeup_cookie - wakeup_done_ctr > 0)
536 cv_wait(&callout_wait, &callout_wait_lock);
537
538 mtx_unlock(&callout_wait_lock);
539 } else if (use_mtx && !curr_cancelled) {
540 /* We can stop the callout before it runs. */
541 curr_cancelled = 1;
542 mtx_unlock_spin(&callout_lock);
543 return (1);
544 } else
545 mtx_unlock_spin(&callout_lock);
546 return (0);
547 }
548 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
549
550 if (nextsoftcheck == c) {
551 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
552 }
553 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
554
555 if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
556 c->c_func = NULL;
557 SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
558 }
559 mtx_unlock_spin(&callout_lock);
560 return (1);
561 }
562
563 void
564 callout_init(c, mpsafe)
565 struct callout *c;
566 int mpsafe;
567 {
568 bzero(c, sizeof *c);
569 if (mpsafe) {
570 c->c_mtx = NULL;
571 c->c_flags = CALLOUT_RETURNUNLOCKED;
572 } else {
573 c->c_mtx = &Giant;
574 c->c_flags = 0;
575 }
576 }
577
578 void
579 callout_init_mtx(c, mtx, flags)
580 struct callout *c;
581 struct mtx *mtx;
582 int flags;
583 {
584 bzero(c, sizeof *c);
585 c->c_mtx = mtx;
586 KASSERT((flags & ~CALLOUT_RETURNUNLOCKED) == 0,
587 ("callout_init_mtx: bad flags %d", flags));
588 /* CALLOUT_RETURNUNLOCKED makes no sense without a mutex. */
589 KASSERT(mtx != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
590 ("callout_init_mtx: CALLOUT_RETURNUNLOCKED with no mutex"));
591 c->c_flags = flags & CALLOUT_RETURNUNLOCKED;
592 }
593
594 #ifdef APM_FIXUP_CALLTODO
595 /*
596 * Adjust the kernel calltodo timeout list. This routine is used after
597 * an APM resume to recalculate the calltodo timer list values with the
598 * number of hz's we have been sleeping. The next hardclock() will detect
599 * that there are fired timers and run softclock() to execute them.
600 *
601 * Please note, I have not done an exhaustive analysis of what code this
602 * might break. I am motivated to have my select()'s and alarm()'s that
603 * have expired during suspend firing upon resume so that the applications
604 * which set the timer can do the maintanence the timer was for as close
605 * as possible to the originally intended time. Testing this code for a
606 * week showed that resuming from a suspend resulted in 22 to 25 timers
607 * firing, which seemed independant on whether the suspend was 2 hours or
608 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
609 */
610 void
611 adjust_timeout_calltodo(time_change)
612 struct timeval *time_change;
613 {
614 register struct callout *p;
615 unsigned long delta_ticks;
616
617 /*
618 * How many ticks were we asleep?
619 * (stolen from tvtohz()).
620 */
621
622 /* Don't do anything */
623 if (time_change->tv_sec < 0)
624 return;
625 else if (time_change->tv_sec <= LONG_MAX / 1000000)
626 delta_ticks = (time_change->tv_sec * 1000000 +
627 time_change->tv_usec + (tick - 1)) / tick + 1;
628 else if (time_change->tv_sec <= LONG_MAX / hz)
629 delta_ticks = time_change->tv_sec * hz +
630 (time_change->tv_usec + (tick - 1)) / tick + 1;
631 else
632 delta_ticks = LONG_MAX;
633
634 if (delta_ticks > INT_MAX)
635 delta_ticks = INT_MAX;
636
637 /*
638 * Now rip through the timer calltodo list looking for timers
639 * to expire.
640 */
641
642 /* don't collide with softclock() */
643 mtx_lock_spin(&callout_lock);
644 for (p = calltodo.c_next; p != NULL; p = p->c_next) {
645 p->c_time -= delta_ticks;
646
647 /* Break if the timer had more time on it than delta_ticks */
648 if (p->c_time > 0)
649 break;
650
651 /* take back the ticks the timer didn't use (p->c_time <= 0) */
652 delta_ticks = -p->c_time;
653 }
654 mtx_unlock_spin(&callout_lock);
655
656 return;
657 }
658 #endif /* APM_FIXUP_CALLTODO */
Cache object: 33145a251b60fee3eec877cc53801ff2
|