The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kernel/posix-timers.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * linux/kernel/posix-timers.c
    3  *
    4  *
    5  * 2002-10-15  Posix Clocks & timers
    6  *                           by George Anzinger george@mvista.com
    7  *
    8  *                           Copyright (C) 2002 2003 by MontaVista Software.
    9  *
   10  * 2004-06-01  Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
   11  *                           Copyright (C) 2004 Boris Hu
   12  *
   13  * This program is free software; you can redistribute it and/or modify
   14  * it under the terms of the GNU General Public License as published by
   15  * the Free Software Foundation; either version 2 of the License, or (at
   16  * your option) any later version.
   17  *
   18  * This program is distributed in the hope that it will be useful, but
   19  * WITHOUT ANY WARRANTY; without even the implied warranty of
   20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
   21  * General Public License for more details.
   22 
   23  * You should have received a copy of the GNU General Public License
   24  * along with this program; if not, write to the Free Software
   25  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
   26  *
   27  * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
   28  */
   29 
   30 /* These are all the functions necessary to implement
   31  * POSIX clocks & timers
   32  */
   33 #include <linux/mm.h>
   34 #include <linux/interrupt.h>
   35 #include <linux/slab.h>
   36 #include <linux/time.h>
   37 #include <linux/mutex.h>
   38 
   39 #include <asm/uaccess.h>
   40 #include <linux/list.h>
   41 #include <linux/init.h>
   42 #include <linux/compiler.h>
   43 #include <linux/idr.h>
   44 #include <linux/posix-clock.h>
   45 #include <linux/posix-timers.h>
   46 #include <linux/syscalls.h>
   47 #include <linux/wait.h>
   48 #include <linux/workqueue.h>
   49 #include <linux/export.h>
   50 
   51 /*
   52  * Management arrays for POSIX timers.   Timers are kept in slab memory
   53  * Timer ids are allocated by an external routine that keeps track of the
   54  * id and the timer.  The external interface is:
   55  *
   56  * void *idr_find(struct idr *idp, int id);           to find timer_id <id>
   57  * int idr_get_new(struct idr *idp, void *ptr);       to get a new id and
   58  *                                                    related it to <ptr>
   59  * void idr_remove(struct idr *idp, int id);          to release <id>
   60  * void idr_init(struct idr *idp);                    to initialize <idp>
   61  *                                                    which we supply.
   62  * The idr_get_new *may* call slab for more memory so it must not be
   63  * called under a spin lock.  Likewise idr_remore may release memory
   64  * (but it may be ok to do this under a lock...).
   65  * idr_find is just a memory look up and is quite fast.  A -1 return
   66  * indicates that the requested id does not exist.
   67  */
   68 
   69 /*
   70  * Lets keep our timers in a slab cache :-)
   71  */
   72 static struct kmem_cache *posix_timers_cache;
   73 static struct idr posix_timers_id;
   74 static DEFINE_SPINLOCK(idr_lock);
   75 
   76 /*
   77  * we assume that the new SIGEV_THREAD_ID shares no bits with the other
   78  * SIGEV values.  Here we put out an error if this assumption fails.
   79  */
   80 #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
   81                        ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
   82 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
   83 #endif
   84 
   85 /*
   86  * parisc wants ENOTSUP instead of EOPNOTSUPP
   87  */
   88 #ifndef ENOTSUP
   89 # define ENANOSLEEP_NOTSUP EOPNOTSUPP
   90 #else
   91 # define ENANOSLEEP_NOTSUP ENOTSUP
   92 #endif
   93 
   94 /*
   95  * The timer ID is turned into a timer address by idr_find().
   96  * Verifying a valid ID consists of:
   97  *
   98  * a) checking that idr_find() returns other than -1.
   99  * b) checking that the timer id matches the one in the timer itself.
  100  * c) that the timer owner is in the callers thread group.
  101  */
  102 
  103 /*
  104  * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
  105  *          to implement others.  This structure defines the various
  106  *          clocks.
  107  *
  108  * RESOLUTION: Clock resolution is used to round up timer and interval
  109  *          times, NOT to report clock times, which are reported with as
  110  *          much resolution as the system can muster.  In some cases this
  111  *          resolution may depend on the underlying clock hardware and
  112  *          may not be quantifiable until run time, and only then is the
  113  *          necessary code is written.  The standard says we should say
  114  *          something about this issue in the documentation...
  115  *
  116  * FUNCTIONS: The CLOCKs structure defines possible functions to
  117  *          handle various clock functions.
  118  *
  119  *          The standard POSIX timer management code assumes the
  120  *          following: 1.) The k_itimer struct (sched.h) is used for
  121  *          the timer.  2.) The list, it_lock, it_clock, it_id and
  122  *          it_pid fields are not modified by timer code.
  123  *
  124  * Permissions: It is assumed that the clock_settime() function defined
  125  *          for each clock will take care of permission checks.  Some
  126  *          clocks may be set able by any user (i.e. local process
  127  *          clocks) others not.  Currently the only set able clock we
  128  *          have is CLOCK_REALTIME and its high res counter part, both of
  129  *          which we beg off on and pass to do_sys_settimeofday().
  130  */
  131 
  132 static struct k_clock posix_clocks[MAX_CLOCKS];
  133 
  134 /*
  135  * These ones are defined below.
  136  */
  137 static int common_nsleep(const clockid_t, int flags, struct timespec *t,
  138                          struct timespec __user *rmtp);
  139 static int common_timer_create(struct k_itimer *new_timer);
  140 static void common_timer_get(struct k_itimer *, struct itimerspec *);
  141 static int common_timer_set(struct k_itimer *, int,
  142                             struct itimerspec *, struct itimerspec *);
  143 static int common_timer_del(struct k_itimer *timer);
  144 
  145 static enum hrtimer_restart posix_timer_fn(struct hrtimer *data);
  146 
  147 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
  148 
  149 #define lock_timer(tid, flags)                                             \
  150 ({      struct k_itimer *__timr;                                           \
  151         __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags));  \
  152         __timr;                                                            \
  153 })
  154 
  155 static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
  156 {
  157         spin_unlock_irqrestore(&timr->it_lock, flags);
  158 }
  159 
  160 /* Get clock_realtime */
  161 static int posix_clock_realtime_get(clockid_t which_clock, struct timespec *tp)
  162 {
  163         ktime_get_real_ts(tp);
  164         return 0;
  165 }
  166 
  167 /* Set clock_realtime */
  168 static int posix_clock_realtime_set(const clockid_t which_clock,
  169                                     const struct timespec *tp)
  170 {
  171         return do_sys_settimeofday(tp, NULL);
  172 }
  173 
  174 static int posix_clock_realtime_adj(const clockid_t which_clock,
  175                                     struct timex *t)
  176 {
  177         return do_adjtimex(t);
  178 }
  179 
  180 /*
  181  * Get monotonic time for posix timers
  182  */
  183 static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
  184 {
  185         ktime_get_ts(tp);
  186         return 0;
  187 }
  188 
  189 /*
  190  * Get monotonic-raw time for posix timers
  191  */
  192 static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
  193 {
  194         getrawmonotonic(tp);
  195         return 0;
  196 }
  197 
  198 
  199 static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp)
  200 {
  201         *tp = current_kernel_time();
  202         return 0;
  203 }
  204 
  205 static int posix_get_monotonic_coarse(clockid_t which_clock,
  206                                                 struct timespec *tp)
  207 {
  208         *tp = get_monotonic_coarse();
  209         return 0;
  210 }
  211 
  212 static int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
  213 {
  214         *tp = ktime_to_timespec(KTIME_LOW_RES);
  215         return 0;
  216 }
  217 
  218 static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
  219 {
  220         get_monotonic_boottime(tp);
  221         return 0;
  222 }
  223 
  224 
  225 /*
  226  * Initialize everything, well, just everything in Posix clocks/timers ;)
  227  */
  228 static __init int init_posix_timers(void)
  229 {
  230         struct k_clock clock_realtime = {
  231                 .clock_getres   = hrtimer_get_res,
  232                 .clock_get      = posix_clock_realtime_get,
  233                 .clock_set      = posix_clock_realtime_set,
  234                 .clock_adj      = posix_clock_realtime_adj,
  235                 .nsleep         = common_nsleep,
  236                 .nsleep_restart = hrtimer_nanosleep_restart,
  237                 .timer_create   = common_timer_create,
  238                 .timer_set      = common_timer_set,
  239                 .timer_get      = common_timer_get,
  240                 .timer_del      = common_timer_del,
  241         };
  242         struct k_clock clock_monotonic = {
  243                 .clock_getres   = hrtimer_get_res,
  244                 .clock_get      = posix_ktime_get_ts,
  245                 .nsleep         = common_nsleep,
  246                 .nsleep_restart = hrtimer_nanosleep_restart,
  247                 .timer_create   = common_timer_create,
  248                 .timer_set      = common_timer_set,
  249                 .timer_get      = common_timer_get,
  250                 .timer_del      = common_timer_del,
  251         };
  252         struct k_clock clock_monotonic_raw = {
  253                 .clock_getres   = hrtimer_get_res,
  254                 .clock_get      = posix_get_monotonic_raw,
  255         };
  256         struct k_clock clock_realtime_coarse = {
  257                 .clock_getres   = posix_get_coarse_res,
  258                 .clock_get      = posix_get_realtime_coarse,
  259         };
  260         struct k_clock clock_monotonic_coarse = {
  261                 .clock_getres   = posix_get_coarse_res,
  262                 .clock_get      = posix_get_monotonic_coarse,
  263         };
  264         struct k_clock clock_boottime = {
  265                 .clock_getres   = hrtimer_get_res,
  266                 .clock_get      = posix_get_boottime,
  267                 .nsleep         = common_nsleep,
  268                 .nsleep_restart = hrtimer_nanosleep_restart,
  269                 .timer_create   = common_timer_create,
  270                 .timer_set      = common_timer_set,
  271                 .timer_get      = common_timer_get,
  272                 .timer_del      = common_timer_del,
  273         };
  274 
  275         posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
  276         posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
  277         posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
  278         posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
  279         posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
  280         posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime);
  281 
  282         posix_timers_cache = kmem_cache_create("posix_timers_cache",
  283                                         sizeof (struct k_itimer), 0, SLAB_PANIC,
  284                                         NULL);
  285         idr_init(&posix_timers_id);
  286         return 0;
  287 }
  288 
  289 __initcall(init_posix_timers);
  290 
  291 static void schedule_next_timer(struct k_itimer *timr)
  292 {
  293         struct hrtimer *timer = &timr->it.real.timer;
  294 
  295         if (timr->it.real.interval.tv64 == 0)
  296                 return;
  297 
  298         timr->it_overrun += (unsigned int) hrtimer_forward(timer,
  299                                                 timer->base->get_time(),
  300                                                 timr->it.real.interval);
  301 
  302         timr->it_overrun_last = timr->it_overrun;
  303         timr->it_overrun = -1;
  304         ++timr->it_requeue_pending;
  305         hrtimer_restart(timer);
  306 }
  307 
  308 /*
  309  * This function is exported for use by the signal deliver code.  It is
  310  * called just prior to the info block being released and passes that
  311  * block to us.  It's function is to update the overrun entry AND to
  312  * restart the timer.  It should only be called if the timer is to be
  313  * restarted (i.e. we have flagged this in the sys_private entry of the
  314  * info block).
  315  *
  316  * To protect against the timer going away while the interrupt is queued,
  317  * we require that the it_requeue_pending flag be set.
  318  */
  319 void do_schedule_next_timer(struct siginfo *info)
  320 {
  321         struct k_itimer *timr;
  322         unsigned long flags;
  323 
  324         timr = lock_timer(info->si_tid, &flags);
  325 
  326         if (timr && timr->it_requeue_pending == info->si_sys_private) {
  327                 if (timr->it_clock < 0)
  328                         posix_cpu_timer_schedule(timr);
  329                 else
  330                         schedule_next_timer(timr);
  331 
  332                 info->si_overrun += timr->it_overrun_last;
  333         }
  334 
  335         if (timr)
  336                 unlock_timer(timr, flags);
  337 }
  338 
  339 int posix_timer_event(struct k_itimer *timr, int si_private)
  340 {
  341         struct task_struct *task;
  342         int shared, ret = -1;
  343         /*
  344          * FIXME: if ->sigq is queued we can race with
  345          * dequeue_signal()->do_schedule_next_timer().
  346          *
  347          * If dequeue_signal() sees the "right" value of
  348          * si_sys_private it calls do_schedule_next_timer().
  349          * We re-queue ->sigq and drop ->it_lock().
  350          * do_schedule_next_timer() locks the timer
  351          * and re-schedules it while ->sigq is pending.
  352          * Not really bad, but not that we want.
  353          */
  354         timr->sigq->info.si_sys_private = si_private;
  355 
  356         rcu_read_lock();
  357         task = pid_task(timr->it_pid, PIDTYPE_PID);
  358         if (task) {
  359                 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
  360                 ret = send_sigqueue(timr->sigq, task, shared);
  361         }
  362         rcu_read_unlock();
  363         /* If we failed to send the signal the timer stops. */
  364         return ret > 0;
  365 }
  366 EXPORT_SYMBOL_GPL(posix_timer_event);
  367 
  368 /*
  369  * This function gets called when a POSIX.1b interval timer expires.  It
  370  * is used as a callback from the kernel internal timer.  The
  371  * run_timer_list code ALWAYS calls with interrupts on.
  372 
  373  * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
  374  */
  375 static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
  376 {
  377         struct k_itimer *timr;
  378         unsigned long flags;
  379         int si_private = 0;
  380         enum hrtimer_restart ret = HRTIMER_NORESTART;
  381 
  382         timr = container_of(timer, struct k_itimer, it.real.timer);
  383         spin_lock_irqsave(&timr->it_lock, flags);
  384 
  385         if (timr->it.real.interval.tv64 != 0)
  386                 si_private = ++timr->it_requeue_pending;
  387 
  388         if (posix_timer_event(timr, si_private)) {
  389                 /*
  390                  * signal was not sent because of sig_ignor
  391                  * we will not get a call back to restart it AND
  392                  * it should be restarted.
  393                  */
  394                 if (timr->it.real.interval.tv64 != 0) {
  395                         ktime_t now = hrtimer_cb_get_time(timer);
  396 
  397                         /*
  398                          * FIXME: What we really want, is to stop this
  399                          * timer completely and restart it in case the
  400                          * SIG_IGN is removed. This is a non trivial
  401                          * change which involves sighand locking
  402                          * (sigh !), which we don't want to do late in
  403                          * the release cycle.
  404                          *
  405                          * For now we just let timers with an interval
  406                          * less than a jiffie expire every jiffie to
  407                          * avoid softirq starvation in case of SIG_IGN
  408                          * and a very small interval, which would put
  409                          * the timer right back on the softirq pending
  410                          * list. By moving now ahead of time we trick
  411                          * hrtimer_forward() to expire the timer
  412                          * later, while we still maintain the overrun
  413                          * accuracy, but have some inconsistency in
  414                          * the timer_gettime() case. This is at least
  415                          * better than a starved softirq. A more
  416                          * complex fix which solves also another related
  417                          * inconsistency is already in the pipeline.
  418                          */
  419 #ifdef CONFIG_HIGH_RES_TIMERS
  420                         {
  421                                 ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
  422 
  423                                 if (timr->it.real.interval.tv64 < kj.tv64)
  424                                         now = ktime_add(now, kj);
  425                         }
  426 #endif
  427                         timr->it_overrun += (unsigned int)
  428                                 hrtimer_forward(timer, now,
  429                                                 timr->it.real.interval);
  430                         ret = HRTIMER_RESTART;
  431                         ++timr->it_requeue_pending;
  432                 }
  433         }
  434 
  435         unlock_timer(timr, flags);
  436         return ret;
  437 }
  438 
  439 static struct pid *good_sigevent(sigevent_t * event)
  440 {
  441         struct task_struct *rtn = current->group_leader;
  442 
  443         if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
  444                 (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
  445                  !same_thread_group(rtn, current) ||
  446                  (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
  447                 return NULL;
  448 
  449         if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
  450             ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
  451                 return NULL;
  452 
  453         return task_pid(rtn);
  454 }
  455 
  456 void posix_timers_register_clock(const clockid_t clock_id,
  457                                  struct k_clock *new_clock)
  458 {
  459         if ((unsigned) clock_id >= MAX_CLOCKS) {
  460                 printk(KERN_WARNING "POSIX clock register failed for clock_id %d\n",
  461                        clock_id);
  462                 return;
  463         }
  464 
  465         if (!new_clock->clock_get) {
  466                 printk(KERN_WARNING "POSIX clock id %d lacks clock_get()\n",
  467                        clock_id);
  468                 return;
  469         }
  470         if (!new_clock->clock_getres) {
  471                 printk(KERN_WARNING "POSIX clock id %d lacks clock_getres()\n",
  472                        clock_id);
  473                 return;
  474         }
  475 
  476         posix_clocks[clock_id] = *new_clock;
  477 }
  478 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
  479 
  480 static struct k_itimer * alloc_posix_timer(void)
  481 {
  482         struct k_itimer *tmr;
  483         tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
  484         if (!tmr)
  485                 return tmr;
  486         if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
  487                 kmem_cache_free(posix_timers_cache, tmr);
  488                 return NULL;
  489         }
  490         memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
  491         return tmr;
  492 }
  493 
  494 static void k_itimer_rcu_free(struct rcu_head *head)
  495 {
  496         struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
  497 
  498         kmem_cache_free(posix_timers_cache, tmr);
  499 }
  500 
  501 #define IT_ID_SET       1
  502 #define IT_ID_NOT_SET   0
  503 static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
  504 {
  505         if (it_id_set) {
  506                 unsigned long flags;
  507                 spin_lock_irqsave(&idr_lock, flags);
  508                 idr_remove(&posix_timers_id, tmr->it_id);
  509                 spin_unlock_irqrestore(&idr_lock, flags);
  510         }
  511         put_pid(tmr->it_pid);
  512         sigqueue_free(tmr->sigq);
  513         call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
  514 }
  515 
  516 static struct k_clock *clockid_to_kclock(const clockid_t id)
  517 {
  518         if (id < 0)
  519                 return (id & CLOCKFD_MASK) == CLOCKFD ?
  520                         &clock_posix_dynamic : &clock_posix_cpu;
  521 
  522         if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
  523                 return NULL;
  524         return &posix_clocks[id];
  525 }
  526 
  527 static int common_timer_create(struct k_itimer *new_timer)
  528 {
  529         hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
  530         return 0;
  531 }
  532 
  533 /* Create a POSIX.1b interval timer. */
  534 
  535 SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
  536                 struct sigevent __user *, timer_event_spec,
  537                 timer_t __user *, created_timer_id)
  538 {
  539         struct k_clock *kc = clockid_to_kclock(which_clock);
  540         struct k_itimer *new_timer;
  541         int error, new_timer_id;
  542         sigevent_t event;
  543         int it_id_set = IT_ID_NOT_SET;
  544 
  545         if (!kc)
  546                 return -EINVAL;
  547         if (!kc->timer_create)
  548                 return -EOPNOTSUPP;
  549 
  550         new_timer = alloc_posix_timer();
  551         if (unlikely(!new_timer))
  552                 return -EAGAIN;
  553 
  554         spin_lock_init(&new_timer->it_lock);
  555  retry:
  556         if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
  557                 error = -EAGAIN;
  558                 goto out;
  559         }
  560         spin_lock_irq(&idr_lock);
  561         error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
  562         spin_unlock_irq(&idr_lock);
  563         if (error) {
  564                 if (error == -EAGAIN)
  565                         goto retry;
  566                 /*
  567                  * Weird looking, but we return EAGAIN if the IDR is
  568                  * full (proper POSIX return value for this)
  569                  */
  570                 error = -EAGAIN;
  571                 goto out;
  572         }
  573 
  574         it_id_set = IT_ID_SET;
  575         new_timer->it_id = (timer_t) new_timer_id;
  576         new_timer->it_clock = which_clock;
  577         new_timer->it_overrun = -1;
  578 
  579         if (timer_event_spec) {
  580                 if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
  581                         error = -EFAULT;
  582                         goto out;
  583                 }
  584                 rcu_read_lock();
  585                 new_timer->it_pid = get_pid(good_sigevent(&event));
  586                 rcu_read_unlock();
  587                 if (!new_timer->it_pid) {
  588                         error = -EINVAL;
  589                         goto out;
  590                 }
  591         } else {
  592                 event.sigev_notify = SIGEV_SIGNAL;
  593                 event.sigev_signo = SIGALRM;
  594                 event.sigev_value.sival_int = new_timer->it_id;
  595                 new_timer->it_pid = get_pid(task_tgid(current));
  596         }
  597 
  598         new_timer->it_sigev_notify     = event.sigev_notify;
  599         new_timer->sigq->info.si_signo = event.sigev_signo;
  600         new_timer->sigq->info.si_value = event.sigev_value;
  601         new_timer->sigq->info.si_tid   = new_timer->it_id;
  602         new_timer->sigq->info.si_code  = SI_TIMER;
  603 
  604         if (copy_to_user(created_timer_id,
  605                          &new_timer_id, sizeof (new_timer_id))) {
  606                 error = -EFAULT;
  607                 goto out;
  608         }
  609 
  610         error = kc->timer_create(new_timer);
  611         if (error)
  612                 goto out;
  613 
  614         spin_lock_irq(&current->sighand->siglock);
  615         new_timer->it_signal = current->signal;
  616         list_add(&new_timer->list, &current->signal->posix_timers);
  617         spin_unlock_irq(&current->sighand->siglock);
  618 
  619         return 0;
  620         /*
  621          * In the case of the timer belonging to another task, after
  622          * the task is unlocked, the timer is owned by the other task
  623          * and may cease to exist at any time.  Don't use or modify
  624          * new_timer after the unlock call.
  625          */
  626 out:
  627         release_posix_timer(new_timer, it_id_set);
  628         return error;
  629 }
  630 
  631 /*
  632  * Locking issues: We need to protect the result of the id look up until
  633  * we get the timer locked down so it is not deleted under us.  The
  634  * removal is done under the idr spinlock so we use that here to bridge
  635  * the find to the timer lock.  To avoid a dead lock, the timer id MUST
  636  * be release with out holding the timer lock.
  637  */
  638 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
  639 {
  640         struct k_itimer *timr;
  641 
  642         rcu_read_lock();
  643         timr = idr_find(&posix_timers_id, (int)timer_id);
  644         if (timr) {
  645                 spin_lock_irqsave(&timr->it_lock, *flags);
  646                 if (timr->it_signal == current->signal) {
  647                         rcu_read_unlock();
  648                         return timr;
  649                 }
  650                 spin_unlock_irqrestore(&timr->it_lock, *flags);
  651         }
  652         rcu_read_unlock();
  653 
  654         return NULL;
  655 }
  656 
  657 /*
  658  * Get the time remaining on a POSIX.1b interval timer.  This function
  659  * is ALWAYS called with spin_lock_irq on the timer, thus it must not
  660  * mess with irq.
  661  *
  662  * We have a couple of messes to clean up here.  First there is the case
  663  * of a timer that has a requeue pending.  These timers should appear to
  664  * be in the timer list with an expiry as if we were to requeue them
  665  * now.
  666  *
  667  * The second issue is the SIGEV_NONE timer which may be active but is
  668  * not really ever put in the timer list (to save system resources).
  669  * This timer may be expired, and if so, we will do it here.  Otherwise
  670  * it is the same as a requeue pending timer WRT to what we should
  671  * report.
  672  */
  673 static void
  674 common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
  675 {
  676         ktime_t now, remaining, iv;
  677         struct hrtimer *timer = &timr->it.real.timer;
  678 
  679         memset(cur_setting, 0, sizeof(struct itimerspec));
  680 
  681         iv = timr->it.real.interval;
  682 
  683         /* interval timer ? */
  684         if (iv.tv64)
  685                 cur_setting->it_interval = ktime_to_timespec(iv);
  686         else if (!hrtimer_active(timer) &&
  687                  (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
  688                 return;
  689 
  690         now = timer->base->get_time();
  691 
  692         /*
  693          * When a requeue is pending or this is a SIGEV_NONE
  694          * timer move the expiry time forward by intervals, so
  695          * expiry is > now.
  696          */
  697         if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
  698             (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
  699                 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
  700 
  701         remaining = ktime_sub(hrtimer_get_expires(timer), now);
  702         /* Return 0 only, when the timer is expired and not pending */
  703         if (remaining.tv64 <= 0) {
  704                 /*
  705                  * A single shot SIGEV_NONE timer must return 0, when
  706                  * it is expired !
  707                  */
  708                 if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
  709                         cur_setting->it_value.tv_nsec = 1;
  710         } else
  711                 cur_setting->it_value = ktime_to_timespec(remaining);
  712 }
  713 
  714 /* Get the time remaining on a POSIX.1b interval timer. */
  715 SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
  716                 struct itimerspec __user *, setting)
  717 {
  718         struct itimerspec cur_setting;
  719         struct k_itimer *timr;
  720         struct k_clock *kc;
  721         unsigned long flags;
  722         int ret = 0;
  723 
  724         timr = lock_timer(timer_id, &flags);
  725         if (!timr)
  726                 return -EINVAL;
  727 
  728         kc = clockid_to_kclock(timr->it_clock);
  729         if (WARN_ON_ONCE(!kc || !kc->timer_get))
  730                 ret = -EINVAL;
  731         else
  732                 kc->timer_get(timr, &cur_setting);
  733 
  734         unlock_timer(timr, flags);
  735 
  736         if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
  737                 return -EFAULT;
  738 
  739         return ret;
  740 }
  741 
  742 /*
  743  * Get the number of overruns of a POSIX.1b interval timer.  This is to
  744  * be the overrun of the timer last delivered.  At the same time we are
  745  * accumulating overruns on the next timer.  The overrun is frozen when
  746  * the signal is delivered, either at the notify time (if the info block
  747  * is not queued) or at the actual delivery time (as we are informed by
  748  * the call back to do_schedule_next_timer().  So all we need to do is
  749  * to pick up the frozen overrun.
  750  */
  751 SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
  752 {
  753         struct k_itimer *timr;
  754         int overrun;
  755         unsigned long flags;
  756 
  757         timr = lock_timer(timer_id, &flags);
  758         if (!timr)
  759                 return -EINVAL;
  760 
  761         overrun = timr->it_overrun_last;
  762         unlock_timer(timr, flags);
  763 
  764         return overrun;
  765 }
  766 
  767 /* Set a POSIX.1b interval timer. */
  768 /* timr->it_lock is taken. */
  769 static int
  770 common_timer_set(struct k_itimer *timr, int flags,
  771                  struct itimerspec *new_setting, struct itimerspec *old_setting)
  772 {
  773         struct hrtimer *timer = &timr->it.real.timer;
  774         enum hrtimer_mode mode;
  775 
  776         if (old_setting)
  777                 common_timer_get(timr, old_setting);
  778 
  779         /* disable the timer */
  780         timr->it.real.interval.tv64 = 0;
  781         /*
  782          * careful here.  If smp we could be in the "fire" routine which will
  783          * be spinning as we hold the lock.  But this is ONLY an SMP issue.
  784          */
  785         if (hrtimer_try_to_cancel(timer) < 0)
  786                 return TIMER_RETRY;
  787 
  788         timr->it_requeue_pending = (timr->it_requeue_pending + 2) & 
  789                 ~REQUEUE_PENDING;
  790         timr->it_overrun_last = 0;
  791 
  792         /* switch off the timer when it_value is zero */
  793         if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
  794                 return 0;
  795 
  796         mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
  797         hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
  798         timr->it.real.timer.function = posix_timer_fn;
  799 
  800         hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
  801 
  802         /* Convert interval */
  803         timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
  804 
  805         /* SIGEV_NONE timers are not queued ! See common_timer_get */
  806         if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
  807                 /* Setup correct expiry time for relative timers */
  808                 if (mode == HRTIMER_MODE_REL) {
  809                         hrtimer_add_expires(timer, timer->base->get_time());
  810                 }
  811                 return 0;
  812         }
  813 
  814         hrtimer_start_expires(timer, mode);
  815         return 0;
  816 }
  817 
  818 /* Set a POSIX.1b interval timer */
  819 SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
  820                 const struct itimerspec __user *, new_setting,
  821                 struct itimerspec __user *, old_setting)
  822 {
  823         struct k_itimer *timr;
  824         struct itimerspec new_spec, old_spec;
  825         int error = 0;
  826         unsigned long flag;
  827         struct itimerspec *rtn = old_setting ? &old_spec : NULL;
  828         struct k_clock *kc;
  829 
  830         if (!new_setting)
  831                 return -EINVAL;
  832 
  833         if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
  834                 return -EFAULT;
  835 
  836         if (!timespec_valid(&new_spec.it_interval) ||
  837             !timespec_valid(&new_spec.it_value))
  838                 return -EINVAL;
  839 retry:
  840         timr = lock_timer(timer_id, &flag);
  841         if (!timr)
  842                 return -EINVAL;
  843 
  844         kc = clockid_to_kclock(timr->it_clock);
  845         if (WARN_ON_ONCE(!kc || !kc->timer_set))
  846                 error = -EINVAL;
  847         else
  848                 error = kc->timer_set(timr, flags, &new_spec, rtn);
  849 
  850         unlock_timer(timr, flag);
  851         if (error == TIMER_RETRY) {
  852                 rtn = NULL;     // We already got the old time...
  853                 goto retry;
  854         }
  855 
  856         if (old_setting && !error &&
  857             copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
  858                 error = -EFAULT;
  859 
  860         return error;
  861 }
  862 
  863 static int common_timer_del(struct k_itimer *timer)
  864 {
  865         timer->it.real.interval.tv64 = 0;
  866 
  867         if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
  868                 return TIMER_RETRY;
  869         return 0;
  870 }
  871 
  872 static inline int timer_delete_hook(struct k_itimer *timer)
  873 {
  874         struct k_clock *kc = clockid_to_kclock(timer->it_clock);
  875 
  876         if (WARN_ON_ONCE(!kc || !kc->timer_del))
  877                 return -EINVAL;
  878         return kc->timer_del(timer);
  879 }
  880 
  881 /* Delete a POSIX.1b interval timer. */
  882 SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
  883 {
  884         struct k_itimer *timer;
  885         unsigned long flags;
  886 
  887 retry_delete:
  888         timer = lock_timer(timer_id, &flags);
  889         if (!timer)
  890                 return -EINVAL;
  891 
  892         if (timer_delete_hook(timer) == TIMER_RETRY) {
  893                 unlock_timer(timer, flags);
  894                 goto retry_delete;
  895         }
  896 
  897         spin_lock(&current->sighand->siglock);
  898         list_del(&timer->list);
  899         spin_unlock(&current->sighand->siglock);
  900         /*
  901          * This keeps any tasks waiting on the spin lock from thinking
  902          * they got something (see the lock code above).
  903          */
  904         timer->it_signal = NULL;
  905 
  906         unlock_timer(timer, flags);
  907         release_posix_timer(timer, IT_ID_SET);
  908         return 0;
  909 }
  910 
  911 /*
  912  * return timer owned by the process, used by exit_itimers
  913  */
  914 static void itimer_delete(struct k_itimer *timer)
  915 {
  916         unsigned long flags;
  917 
  918 retry_delete:
  919         spin_lock_irqsave(&timer->it_lock, flags);
  920 
  921         if (timer_delete_hook(timer) == TIMER_RETRY) {
  922                 unlock_timer(timer, flags);
  923                 goto retry_delete;
  924         }
  925         list_del(&timer->list);
  926         /*
  927          * This keeps any tasks waiting on the spin lock from thinking
  928          * they got something (see the lock code above).
  929          */
  930         timer->it_signal = NULL;
  931 
  932         unlock_timer(timer, flags);
  933         release_posix_timer(timer, IT_ID_SET);
  934 }
  935 
  936 /*
  937  * This is called by do_exit or de_thread, only when there are no more
  938  * references to the shared signal_struct.
  939  */
  940 void exit_itimers(struct signal_struct *sig)
  941 {
  942         struct k_itimer *tmr;
  943 
  944         while (!list_empty(&sig->posix_timers)) {
  945                 tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
  946                 itimer_delete(tmr);
  947         }
  948 }
  949 
  950 SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
  951                 const struct timespec __user *, tp)
  952 {
  953         struct k_clock *kc = clockid_to_kclock(which_clock);
  954         struct timespec new_tp;
  955 
  956         if (!kc || !kc->clock_set)
  957                 return -EINVAL;
  958 
  959         if (copy_from_user(&new_tp, tp, sizeof (*tp)))
  960                 return -EFAULT;
  961 
  962         return kc->clock_set(which_clock, &new_tp);
  963 }
  964 
  965 SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
  966                 struct timespec __user *,tp)
  967 {
  968         struct k_clock *kc = clockid_to_kclock(which_clock);
  969         struct timespec kernel_tp;
  970         int error;
  971 
  972         if (!kc)
  973                 return -EINVAL;
  974 
  975         error = kc->clock_get(which_clock, &kernel_tp);
  976 
  977         if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
  978                 error = -EFAULT;
  979 
  980         return error;
  981 }
  982 
  983 SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
  984                 struct timex __user *, utx)
  985 {
  986         struct k_clock *kc = clockid_to_kclock(which_clock);
  987         struct timex ktx;
  988         int err;
  989 
  990         if (!kc)
  991                 return -EINVAL;
  992         if (!kc->clock_adj)
  993                 return -EOPNOTSUPP;
  994 
  995         if (copy_from_user(&ktx, utx, sizeof(ktx)))
  996                 return -EFAULT;
  997 
  998         err = kc->clock_adj(which_clock, &ktx);
  999 
 1000         if (!err && copy_to_user(utx, &ktx, sizeof(ktx)))
 1001                 return -EFAULT;
 1002 
 1003         return err;
 1004 }
 1005 
 1006 SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
 1007                 struct timespec __user *, tp)
 1008 {
 1009         struct k_clock *kc = clockid_to_kclock(which_clock);
 1010         struct timespec rtn_tp;
 1011         int error;
 1012 
 1013         if (!kc)
 1014                 return -EINVAL;
 1015 
 1016         error = kc->clock_getres(which_clock, &rtn_tp);
 1017 
 1018         if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp)))
 1019                 error = -EFAULT;
 1020 
 1021         return error;
 1022 }
 1023 
 1024 /*
 1025  * nanosleep for monotonic and realtime clocks
 1026  */
 1027 static int common_nsleep(const clockid_t which_clock, int flags,
 1028                          struct timespec *tsave, struct timespec __user *rmtp)
 1029 {
 1030         return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
 1031                                  HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
 1032                                  which_clock);
 1033 }
 1034 
 1035 SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
 1036                 const struct timespec __user *, rqtp,
 1037                 struct timespec __user *, rmtp)
 1038 {
 1039         struct k_clock *kc = clockid_to_kclock(which_clock);
 1040         struct timespec t;
 1041 
 1042         if (!kc)
 1043                 return -EINVAL;
 1044         if (!kc->nsleep)
 1045                 return -ENANOSLEEP_NOTSUP;
 1046 
 1047         if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
 1048                 return -EFAULT;
 1049 
 1050         if (!timespec_valid(&t))
 1051                 return -EINVAL;
 1052 
 1053         return kc->nsleep(which_clock, flags, &t, rmtp);
 1054 }
 1055 
 1056 /*
 1057  * This will restart clock_nanosleep. This is required only by
 1058  * compat_clock_nanosleep_restart for now.
 1059  */
 1060 long clock_nanosleep_restart(struct restart_block *restart_block)
 1061 {
 1062         clockid_t which_clock = restart_block->nanosleep.clockid;
 1063         struct k_clock *kc = clockid_to_kclock(which_clock);
 1064 
 1065         if (WARN_ON_ONCE(!kc || !kc->nsleep_restart))
 1066                 return -EINVAL;
 1067 
 1068         return kc->nsleep_restart(restart_block);
 1069 }

Cache object: 653ffc847ad60ac5f7b5b5ff815727a2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.