The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kernel/softirq.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  *      linux/kernel/softirq.c
    3  *
    4  *      Copyright (C) 1992 Linus Torvalds
    5  *
    6  * Fixed a disable_bh()/enable_bh() race (was causing a console lockup)
    7  * due bh_mask_count not atomic handling. Copyright (C) 1998  Andrea Arcangeli
    8  *
    9  * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
   10  */
   11 
   12 #include <linux/config.h>
   13 #include <linux/mm.h>
   14 #include <linux/kernel_stat.h>
   15 #include <linux/interrupt.h>
   16 #include <linux/smp_lock.h>
   17 #include <linux/init.h>
   18 #include <linux/tqueue.h>
   19 
   20 /*
   21    - No shared variables, all the data are CPU local.
   22    - If a softirq needs serialization, let it serialize itself
   23      by its own spinlocks.
   24    - Even if softirq is serialized, only local cpu is marked for
   25      execution. Hence, we get something sort of weak cpu binding.
   26      Though it is still not clear, will it result in better locality
   27      or will not.
   28    - These softirqs are not masked by global cli() and start_bh_atomic()
   29      (by clear reasons). Hence, old parts of code still using global locks
   30      MUST NOT use softirqs, but insert interfacing routines acquiring
   31      global locks. F.e. look at BHs implementation.
   32 
   33    Examples:
   34    - NET RX softirq. It is multithreaded and does not require
   35      any global serialization.
   36    - NET TX softirq. It kicks software netdevice queues, hence
   37      it is logically serialized per device, but this serialization
   38      is invisible to common code.
   39    - Tasklets: serialized wrt itself.
   40    - Bottom halves: globally serialized, grr...
   41  */
   42 
   43 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
   44 
   45 static struct softirq_action softirq_vec[32] __cacheline_aligned;
   46 
   47 /*
   48  * we cannot loop indefinitely here to avoid userspace starvation,
   49  * but we also don't want to introduce a worst case 1/HZ latency
   50  * to the pending events, so lets the scheduler to balance
   51  * the softirq load for us.
   52  */
   53 static inline void wakeup_softirqd(unsigned cpu)
   54 {
   55         struct task_struct * tsk = ksoftirqd_task(cpu);
   56 
   57         if (tsk && tsk->state != TASK_RUNNING)
   58                 wake_up_process(tsk);
   59 }
   60 
   61 asmlinkage void do_softirq()
   62 {
   63         int cpu = smp_processor_id();
   64         __u32 pending;
   65         unsigned long flags;
   66         __u32 mask;
   67 
   68         if (in_interrupt())
   69                 return;
   70 
   71         local_irq_save(flags);
   72 
   73         pending = softirq_pending(cpu);
   74 
   75         if (pending) {
   76                 struct softirq_action *h;
   77 
   78                 mask = ~pending;
   79                 local_bh_disable();
   80 restart:
   81                 /* Reset the pending bitmask before enabling irqs */
   82                 softirq_pending(cpu) = 0;
   83 
   84                 local_irq_enable();
   85 
   86                 h = softirq_vec;
   87 
   88                 do {
   89                         if (pending & 1)
   90                                 h->action(h);
   91                         h++;
   92                         pending >>= 1;
   93                 } while (pending);
   94 
   95                 local_irq_disable();
   96 
   97                 pending = softirq_pending(cpu);
   98                 if (pending & mask) {
   99                         mask &= ~pending;
  100                         goto restart;
  101                 }
  102                 __local_bh_enable();
  103 
  104                 if (pending)
  105                         wakeup_softirqd(cpu);
  106         }
  107 
  108         local_irq_restore(flags);
  109 }
  110 
  111 /*
  112  * This function must run with irq disabled!
  113  */
  114 inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
  115 {
  116         __cpu_raise_softirq(cpu, nr);
  117 
  118         /*
  119          * If we're in an interrupt or bh, we're done
  120          * (this also catches bh-disabled code). We will
  121          * actually run the softirq once we return from
  122          * the irq or bh.
  123          *
  124          * Otherwise we wake up ksoftirqd to make sure we
  125          * schedule the softirq soon.
  126          */
  127         if (!(local_irq_count(cpu) | local_bh_count(cpu)))
  128                 wakeup_softirqd(cpu);
  129 }
  130 
  131 void raise_softirq(unsigned int nr)
  132 {
  133         unsigned long flags;
  134 
  135         local_irq_save(flags);
  136         cpu_raise_softirq(smp_processor_id(), nr);
  137         local_irq_restore(flags);
  138 }
  139 
  140 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
  141 {
  142         softirq_vec[nr].data = data;
  143         softirq_vec[nr].action = action;
  144 }
  145 
  146 
  147 /* Tasklets */
  148 
  149 struct tasklet_head tasklet_vec[NR_CPUS] __cacheline_aligned;
  150 struct tasklet_head tasklet_hi_vec[NR_CPUS] __cacheline_aligned;
  151 
  152 void __tasklet_schedule(struct tasklet_struct *t)
  153 {
  154         int cpu = smp_processor_id();
  155         unsigned long flags;
  156 
  157         local_irq_save(flags);
  158         t->next = tasklet_vec[cpu].list;
  159         tasklet_vec[cpu].list = t;
  160         cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
  161         local_irq_restore(flags);
  162 }
  163 
  164 void __tasklet_hi_schedule(struct tasklet_struct *t)
  165 {
  166         int cpu = smp_processor_id();
  167         unsigned long flags;
  168 
  169         local_irq_save(flags);
  170         t->next = tasklet_hi_vec[cpu].list;
  171         tasklet_hi_vec[cpu].list = t;
  172         cpu_raise_softirq(cpu, HI_SOFTIRQ);
  173         local_irq_restore(flags);
  174 }
  175 
  176 static void tasklet_action(struct softirq_action *a)
  177 {
  178         int cpu = smp_processor_id();
  179         struct tasklet_struct *list;
  180 
  181         local_irq_disable();
  182         list = tasklet_vec[cpu].list;
  183         tasklet_vec[cpu].list = NULL;
  184         local_irq_enable();
  185 
  186         while (list) {
  187                 struct tasklet_struct *t = list;
  188 
  189                 list = list->next;
  190 
  191                 if (tasklet_trylock(t)) {
  192                         if (!atomic_read(&t->count)) {
  193                                 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
  194                                         BUG();
  195                                 t->func(t->data);
  196                                 tasklet_unlock(t);
  197                                 continue;
  198                         }
  199                         tasklet_unlock(t);
  200                 }
  201 
  202                 local_irq_disable();
  203                 t->next = tasklet_vec[cpu].list;
  204                 tasklet_vec[cpu].list = t;
  205                 __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
  206                 local_irq_enable();
  207         }
  208 }
  209 
  210 static void tasklet_hi_action(struct softirq_action *a)
  211 {
  212         int cpu = smp_processor_id();
  213         struct tasklet_struct *list;
  214 
  215         local_irq_disable();
  216         list = tasklet_hi_vec[cpu].list;
  217         tasklet_hi_vec[cpu].list = NULL;
  218         local_irq_enable();
  219 
  220         while (list) {
  221                 struct tasklet_struct *t = list;
  222 
  223                 list = list->next;
  224 
  225                 if (tasklet_trylock(t)) {
  226                         if (!atomic_read(&t->count)) {
  227                                 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
  228                                         BUG();
  229                                 t->func(t->data);
  230                                 tasklet_unlock(t);
  231                                 continue;
  232                         }
  233                         tasklet_unlock(t);
  234                 }
  235 
  236                 local_irq_disable();
  237                 t->next = tasklet_hi_vec[cpu].list;
  238                 tasklet_hi_vec[cpu].list = t;
  239                 __cpu_raise_softirq(cpu, HI_SOFTIRQ);
  240                 local_irq_enable();
  241         }
  242 }
  243 
  244 
  245 void tasklet_init(struct tasklet_struct *t,
  246                   void (*func)(unsigned long), unsigned long data)
  247 {
  248         t->next = NULL;
  249         t->state = 0;
  250         atomic_set(&t->count, 0);
  251         t->func = func;
  252         t->data = data;
  253 }
  254 
  255 void tasklet_kill(struct tasklet_struct *t)
  256 {
  257         if (in_interrupt())
  258                 printk("Attempt to kill tasklet from interrupt\n");
  259 
  260         while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
  261                 current->state = TASK_RUNNING;
  262                 do {
  263                         yield();
  264                 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
  265         }
  266         tasklet_unlock_wait(t);
  267         clear_bit(TASKLET_STATE_SCHED, &t->state);
  268 }
  269 
  270 
  271 
  272 /* Old style BHs */
  273 
  274 static void (*bh_base[32])(void);
  275 struct tasklet_struct bh_task_vec[32];
  276 
  277 /* BHs are serialized by spinlock global_bh_lock.
  278 
  279    It is still possible to make synchronize_bh() as
  280    spin_unlock_wait(&global_bh_lock). This operation is not used
  281    by kernel now, so that this lock is not made private only
  282    due to wait_on_irq().
  283 
  284    It can be removed only after auditing all the BHs.
  285  */
  286 spinlock_t global_bh_lock = SPIN_LOCK_UNLOCKED;
  287 
  288 static void bh_action(unsigned long nr)
  289 {
  290         int cpu = smp_processor_id();
  291 
  292         if (!spin_trylock(&global_bh_lock))
  293                 goto resched;
  294 
  295         if (!hardirq_trylock(cpu))
  296                 goto resched_unlock;
  297 
  298         if (bh_base[nr])
  299                 bh_base[nr]();
  300 
  301         hardirq_endlock(cpu);
  302         spin_unlock(&global_bh_lock);
  303         return;
  304 
  305 resched_unlock:
  306         spin_unlock(&global_bh_lock);
  307 resched:
  308         mark_bh(nr);
  309 }
  310 
  311 void init_bh(int nr, void (*routine)(void))
  312 {
  313         bh_base[nr] = routine;
  314         mb();
  315 }
  316 
  317 void remove_bh(int nr)
  318 {
  319         tasklet_kill(bh_task_vec+nr);
  320         bh_base[nr] = NULL;
  321 }
  322 
  323 void __init softirq_init()
  324 {
  325         int i;
  326 
  327         for (i=0; i<32; i++)
  328                 tasklet_init(bh_task_vec+i, bh_action, i);
  329 
  330         open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
  331         open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
  332 }
  333 
  334 void __run_task_queue(task_queue *list)
  335 {
  336         struct list_head head, *next;
  337         unsigned long flags;
  338 
  339         spin_lock_irqsave(&tqueue_lock, flags);
  340         list_add(&head, list);
  341         list_del_init(list);
  342         spin_unlock_irqrestore(&tqueue_lock, flags);
  343 
  344         next = head.next;
  345         while (next != &head) {
  346                 void (*f) (void *);
  347                 struct tq_struct *p;
  348                 void *data;
  349 
  350                 p = list_entry(next, struct tq_struct, list);
  351                 next = next->next;
  352                 f = p->routine;
  353                 data = p->data;
  354                 wmb();
  355                 p->sync = 0;
  356                 if (f)
  357                         f(data);
  358         }
  359 }
  360 
  361 static int ksoftirqd(void * __bind_cpu)
  362 {
  363         int bind_cpu = (int) (long) __bind_cpu;
  364         int cpu = cpu_logical_map(bind_cpu);
  365 
  366         daemonize();
  367         current->nice = 19;
  368         sigfillset(&current->blocked);
  369 
  370         /* Migrate to the right CPU */
  371         current->cpus_allowed = 1UL << cpu;
  372         while (smp_processor_id() != cpu)
  373                 schedule();
  374 
  375         sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu);
  376 
  377         __set_current_state(TASK_INTERRUPTIBLE);
  378         mb();
  379 
  380         ksoftirqd_task(cpu) = current;
  381 
  382         for (;;) {
  383                 if (!softirq_pending(cpu))
  384                         schedule();
  385 
  386                 __set_current_state(TASK_RUNNING);
  387 
  388                 while (softirq_pending(cpu)) {
  389                         do_softirq();
  390                         if (current->need_resched)
  391                                 schedule();
  392                 }
  393 
  394                 __set_current_state(TASK_INTERRUPTIBLE);
  395         }
  396 }
  397 
  398 static __init int spawn_ksoftirqd(void)
  399 {
  400         int cpu;
  401 
  402         for (cpu = 0; cpu < smp_num_cpus; cpu++) {
  403                 if (kernel_thread(ksoftirqd, (void *) (long) cpu,
  404                                   CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
  405                         printk("spawn_ksoftirqd() failed for cpu %d\n", cpu);
  406                 else {
  407                         while (!ksoftirqd_task(cpu_logical_map(cpu)))
  408                                 yield();
  409                 }
  410         }
  411 
  412         return 0;
  413 }
  414 
  415 __initcall(spawn_ksoftirqd);

Cache object: e87e3b3f6778021fe71007e36d1228d2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.