The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kernel/relay.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Public API and common code for kernel->userspace relay file support.
    3  *
    4  * See Documentation/filesystems/relay.txt for an overview.
    5  *
    6  * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
    7  * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
    8  *
    9  * Moved to kernel/relay.c by Paul Mundt, 2006.
   10  * November 2006 - CPU hotplug support by Mathieu Desnoyers
   11  *      (mathieu.desnoyers@polymtl.ca)
   12  *
   13  * This file is released under the GPL.
   14  */
   15 #include <linux/errno.h>
   16 #include <linux/stddef.h>
   17 #include <linux/slab.h>
   18 #include <linux/export.h>
   19 #include <linux/string.h>
   20 #include <linux/relay.h>
   21 #include <linux/vmalloc.h>
   22 #include <linux/mm.h>
   23 #include <linux/cpu.h>
   24 #include <linux/splice.h>
   25 
   26 /* list of open channels, for cpu hotplug */
   27 static DEFINE_MUTEX(relay_channels_mutex);
   28 static LIST_HEAD(relay_channels);
   29 
   30 /*
   31  * close() vm_op implementation for relay file mapping.
   32  */
   33 static void relay_file_mmap_close(struct vm_area_struct *vma)
   34 {
   35         struct rchan_buf *buf = vma->vm_private_data;
   36         buf->chan->cb->buf_unmapped(buf, vma->vm_file);
   37 }
   38 
   39 /*
   40  * fault() vm_op implementation for relay file mapping.
   41  */
   42 static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
   43 {
   44         struct page *page;
   45         struct rchan_buf *buf = vma->vm_private_data;
   46         pgoff_t pgoff = vmf->pgoff;
   47 
   48         if (!buf)
   49                 return VM_FAULT_OOM;
   50 
   51         page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT));
   52         if (!page)
   53                 return VM_FAULT_SIGBUS;
   54         get_page(page);
   55         vmf->page = page;
   56 
   57         return 0;
   58 }
   59 
   60 /*
   61  * vm_ops for relay file mappings.
   62  */
   63 static const struct vm_operations_struct relay_file_mmap_ops = {
   64         .fault = relay_buf_fault,
   65         .close = relay_file_mmap_close,
   66 };
   67 
   68 /*
   69  * allocate an array of pointers of struct page
   70  */
   71 static struct page **relay_alloc_page_array(unsigned int n_pages)
   72 {
   73         const size_t pa_size = n_pages * sizeof(struct page *);
   74         if (pa_size > PAGE_SIZE)
   75                 return vzalloc(pa_size);
   76         return kzalloc(pa_size, GFP_KERNEL);
   77 }
   78 
   79 /*
   80  * free an array of pointers of struct page
   81  */
   82 static void relay_free_page_array(struct page **array)
   83 {
   84         if (is_vmalloc_addr(array))
   85                 vfree(array);
   86         else
   87                 kfree(array);
   88 }
   89 
   90 /**
   91  *      relay_mmap_buf: - mmap channel buffer to process address space
   92  *      @buf: relay channel buffer
   93  *      @vma: vm_area_struct describing memory to be mapped
   94  *
   95  *      Returns 0 if ok, negative on error
   96  *
   97  *      Caller should already have grabbed mmap_sem.
   98  */
   99 static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
  100 {
  101         unsigned long length = vma->vm_end - vma->vm_start;
  102         struct file *filp = vma->vm_file;
  103 
  104         if (!buf)
  105                 return -EBADF;
  106 
  107         if (length != (unsigned long)buf->chan->alloc_size)
  108                 return -EINVAL;
  109 
  110         vma->vm_ops = &relay_file_mmap_ops;
  111         vma->vm_flags |= VM_DONTEXPAND;
  112         vma->vm_private_data = buf;
  113         buf->chan->cb->buf_mapped(buf, filp);
  114 
  115         return 0;
  116 }
  117 
  118 /**
  119  *      relay_alloc_buf - allocate a channel buffer
  120  *      @buf: the buffer struct
  121  *      @size: total size of the buffer
  122  *
  123  *      Returns a pointer to the resulting buffer, %NULL if unsuccessful. The
  124  *      passed in size will get page aligned, if it isn't already.
  125  */
  126 static void *relay_alloc_buf(struct rchan_buf *buf, size_t *size)
  127 {
  128         void *mem;
  129         unsigned int i, j, n_pages;
  130 
  131         *size = PAGE_ALIGN(*size);
  132         n_pages = *size >> PAGE_SHIFT;
  133 
  134         buf->page_array = relay_alloc_page_array(n_pages);
  135         if (!buf->page_array)
  136                 return NULL;
  137 
  138         for (i = 0; i < n_pages; i++) {
  139                 buf->page_array[i] = alloc_page(GFP_KERNEL);
  140                 if (unlikely(!buf->page_array[i]))
  141                         goto depopulate;
  142                 set_page_private(buf->page_array[i], (unsigned long)buf);
  143         }
  144         mem = vmap(buf->page_array, n_pages, VM_MAP, PAGE_KERNEL);
  145         if (!mem)
  146                 goto depopulate;
  147 
  148         memset(mem, 0, *size);
  149         buf->page_count = n_pages;
  150         return mem;
  151 
  152 depopulate:
  153         for (j = 0; j < i; j++)
  154                 __free_page(buf->page_array[j]);
  155         relay_free_page_array(buf->page_array);
  156         return NULL;
  157 }
  158 
  159 /**
  160  *      relay_create_buf - allocate and initialize a channel buffer
  161  *      @chan: the relay channel
  162  *
  163  *      Returns channel buffer if successful, %NULL otherwise.
  164  */
  165 static struct rchan_buf *relay_create_buf(struct rchan *chan)
  166 {
  167         struct rchan_buf *buf;
  168 
  169         if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
  170                 return NULL;
  171 
  172         buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
  173         if (!buf)
  174                 return NULL;
  175         buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
  176         if (!buf->padding)
  177                 goto free_buf;
  178 
  179         buf->start = relay_alloc_buf(buf, &chan->alloc_size);
  180         if (!buf->start)
  181                 goto free_buf;
  182 
  183         buf->chan = chan;
  184         kref_get(&buf->chan->kref);
  185         return buf;
  186 
  187 free_buf:
  188         kfree(buf->padding);
  189         kfree(buf);
  190         return NULL;
  191 }
  192 
  193 /**
  194  *      relay_destroy_channel - free the channel struct
  195  *      @kref: target kernel reference that contains the relay channel
  196  *
  197  *      Should only be called from kref_put().
  198  */
  199 static void relay_destroy_channel(struct kref *kref)
  200 {
  201         struct rchan *chan = container_of(kref, struct rchan, kref);
  202         kfree(chan);
  203 }
  204 
  205 /**
  206  *      relay_destroy_buf - destroy an rchan_buf struct and associated buffer
  207  *      @buf: the buffer struct
  208  */
  209 static void relay_destroy_buf(struct rchan_buf *buf)
  210 {
  211         struct rchan *chan = buf->chan;
  212         unsigned int i;
  213 
  214         if (likely(buf->start)) {
  215                 vunmap(buf->start);
  216                 for (i = 0; i < buf->page_count; i++)
  217                         __free_page(buf->page_array[i]);
  218                 relay_free_page_array(buf->page_array);
  219         }
  220         chan->buf[buf->cpu] = NULL;
  221         kfree(buf->padding);
  222         kfree(buf);
  223         kref_put(&chan->kref, relay_destroy_channel);
  224 }
  225 
  226 /**
  227  *      relay_remove_buf - remove a channel buffer
  228  *      @kref: target kernel reference that contains the relay buffer
  229  *
  230  *      Removes the file from the fileystem, which also frees the
  231  *      rchan_buf_struct and the channel buffer.  Should only be called from
  232  *      kref_put().
  233  */
  234 static void relay_remove_buf(struct kref *kref)
  235 {
  236         struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
  237         buf->chan->cb->remove_buf_file(buf->dentry);
  238         relay_destroy_buf(buf);
  239 }
  240 
  241 /**
  242  *      relay_buf_empty - boolean, is the channel buffer empty?
  243  *      @buf: channel buffer
  244  *
  245  *      Returns 1 if the buffer is empty, 0 otherwise.
  246  */
  247 static int relay_buf_empty(struct rchan_buf *buf)
  248 {
  249         return (buf->subbufs_produced - buf->subbufs_consumed) ? 0 : 1;
  250 }
  251 
  252 /**
  253  *      relay_buf_full - boolean, is the channel buffer full?
  254  *      @buf: channel buffer
  255  *
  256  *      Returns 1 if the buffer is full, 0 otherwise.
  257  */
  258 int relay_buf_full(struct rchan_buf *buf)
  259 {
  260         size_t ready = buf->subbufs_produced - buf->subbufs_consumed;
  261         return (ready >= buf->chan->n_subbufs) ? 1 : 0;
  262 }
  263 EXPORT_SYMBOL_GPL(relay_buf_full);
  264 
  265 /*
  266  * High-level relay kernel API and associated functions.
  267  */
  268 
  269 /*
  270  * rchan_callback implementations defining default channel behavior.  Used
  271  * in place of corresponding NULL values in client callback struct.
  272  */
  273 
  274 /*
  275  * subbuf_start() default callback.  Does nothing.
  276  */
  277 static int subbuf_start_default_callback (struct rchan_buf *buf,
  278                                           void *subbuf,
  279                                           void *prev_subbuf,
  280                                           size_t prev_padding)
  281 {
  282         if (relay_buf_full(buf))
  283                 return 0;
  284 
  285         return 1;
  286 }
  287 
  288 /*
  289  * buf_mapped() default callback.  Does nothing.
  290  */
  291 static void buf_mapped_default_callback(struct rchan_buf *buf,
  292                                         struct file *filp)
  293 {
  294 }
  295 
  296 /*
  297  * buf_unmapped() default callback.  Does nothing.
  298  */
  299 static void buf_unmapped_default_callback(struct rchan_buf *buf,
  300                                           struct file *filp)
  301 {
  302 }
  303 
  304 /*
  305  * create_buf_file_create() default callback.  Does nothing.
  306  */
  307 static struct dentry *create_buf_file_default_callback(const char *filename,
  308                                                        struct dentry *parent,
  309                                                        umode_t mode,
  310                                                        struct rchan_buf *buf,
  311                                                        int *is_global)
  312 {
  313         return NULL;
  314 }
  315 
  316 /*
  317  * remove_buf_file() default callback.  Does nothing.
  318  */
  319 static int remove_buf_file_default_callback(struct dentry *dentry)
  320 {
  321         return -EINVAL;
  322 }
  323 
  324 /* relay channel default callbacks */
  325 static struct rchan_callbacks default_channel_callbacks = {
  326         .subbuf_start = subbuf_start_default_callback,
  327         .buf_mapped = buf_mapped_default_callback,
  328         .buf_unmapped = buf_unmapped_default_callback,
  329         .create_buf_file = create_buf_file_default_callback,
  330         .remove_buf_file = remove_buf_file_default_callback,
  331 };
  332 
  333 /**
  334  *      wakeup_readers - wake up readers waiting on a channel
  335  *      @data: contains the channel buffer
  336  *
  337  *      This is the timer function used to defer reader waking.
  338  */
  339 static void wakeup_readers(unsigned long data)
  340 {
  341         struct rchan_buf *buf = (struct rchan_buf *)data;
  342         wake_up_interruptible(&buf->read_wait);
  343 }
  344 
  345 /**
  346  *      __relay_reset - reset a channel buffer
  347  *      @buf: the channel buffer
  348  *      @init: 1 if this is a first-time initialization
  349  *
  350  *      See relay_reset() for description of effect.
  351  */
  352 static void __relay_reset(struct rchan_buf *buf, unsigned int init)
  353 {
  354         size_t i;
  355 
  356         if (init) {
  357                 init_waitqueue_head(&buf->read_wait);
  358                 kref_init(&buf->kref);
  359                 setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
  360         } else
  361                 del_timer_sync(&buf->timer);
  362 
  363         buf->subbufs_produced = 0;
  364         buf->subbufs_consumed = 0;
  365         buf->bytes_consumed = 0;
  366         buf->finalized = 0;
  367         buf->data = buf->start;
  368         buf->offset = 0;
  369 
  370         for (i = 0; i < buf->chan->n_subbufs; i++)
  371                 buf->padding[i] = 0;
  372 
  373         buf->chan->cb->subbuf_start(buf, buf->data, NULL, 0);
  374 }
  375 
  376 /**
  377  *      relay_reset - reset the channel
  378  *      @chan: the channel
  379  *
  380  *      This has the effect of erasing all data from all channel buffers
  381  *      and restarting the channel in its initial state.  The buffers
  382  *      are not freed, so any mappings are still in effect.
  383  *
  384  *      NOTE. Care should be taken that the channel isn't actually
  385  *      being used by anything when this call is made.
  386  */
  387 void relay_reset(struct rchan *chan)
  388 {
  389         unsigned int i;
  390 
  391         if (!chan)
  392                 return;
  393 
  394         if (chan->is_global && chan->buf[0]) {
  395                 __relay_reset(chan->buf[0], 0);
  396                 return;
  397         }
  398 
  399         mutex_lock(&relay_channels_mutex);
  400         for_each_possible_cpu(i)
  401                 if (chan->buf[i])
  402                         __relay_reset(chan->buf[i], 0);
  403         mutex_unlock(&relay_channels_mutex);
  404 }
  405 EXPORT_SYMBOL_GPL(relay_reset);
  406 
  407 static inline void relay_set_buf_dentry(struct rchan_buf *buf,
  408                                         struct dentry *dentry)
  409 {
  410         buf->dentry = dentry;
  411         buf->dentry->d_inode->i_size = buf->early_bytes;
  412 }
  413 
  414 static struct dentry *relay_create_buf_file(struct rchan *chan,
  415                                             struct rchan_buf *buf,
  416                                             unsigned int cpu)
  417 {
  418         struct dentry *dentry;
  419         char *tmpname;
  420 
  421         tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
  422         if (!tmpname)
  423                 return NULL;
  424         snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
  425 
  426         /* Create file in fs */
  427         dentry = chan->cb->create_buf_file(tmpname, chan->parent,
  428                                            S_IRUSR, buf,
  429                                            &chan->is_global);
  430 
  431         kfree(tmpname);
  432 
  433         return dentry;
  434 }
  435 
  436 /*
  437  *      relay_open_buf - create a new relay channel buffer
  438  *
  439  *      used by relay_open() and CPU hotplug.
  440  */
  441 static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
  442 {
  443         struct rchan_buf *buf = NULL;
  444         struct dentry *dentry;
  445 
  446         if (chan->is_global)
  447                 return chan->buf[0];
  448 
  449         buf = relay_create_buf(chan);
  450         if (!buf)
  451                 return NULL;
  452 
  453         if (chan->has_base_filename) {
  454                 dentry = relay_create_buf_file(chan, buf, cpu);
  455                 if (!dentry)
  456                         goto free_buf;
  457                 relay_set_buf_dentry(buf, dentry);
  458         }
  459 
  460         buf->cpu = cpu;
  461         __relay_reset(buf, 1);
  462 
  463         if(chan->is_global) {
  464                 chan->buf[0] = buf;
  465                 buf->cpu = 0;
  466         }
  467 
  468         return buf;
  469 
  470 free_buf:
  471         relay_destroy_buf(buf);
  472         return NULL;
  473 }
  474 
  475 /**
  476  *      relay_close_buf - close a channel buffer
  477  *      @buf: channel buffer
  478  *
  479  *      Marks the buffer finalized and restores the default callbacks.
  480  *      The channel buffer and channel buffer data structure are then freed
  481  *      automatically when the last reference is given up.
  482  */
  483 static void relay_close_buf(struct rchan_buf *buf)
  484 {
  485         buf->finalized = 1;
  486         del_timer_sync(&buf->timer);
  487         kref_put(&buf->kref, relay_remove_buf);
  488 }
  489 
  490 static void setup_callbacks(struct rchan *chan,
  491                                    struct rchan_callbacks *cb)
  492 {
  493         if (!cb) {
  494                 chan->cb = &default_channel_callbacks;
  495                 return;
  496         }
  497 
  498         if (!cb->subbuf_start)
  499                 cb->subbuf_start = subbuf_start_default_callback;
  500         if (!cb->buf_mapped)
  501                 cb->buf_mapped = buf_mapped_default_callback;
  502         if (!cb->buf_unmapped)
  503                 cb->buf_unmapped = buf_unmapped_default_callback;
  504         if (!cb->create_buf_file)
  505                 cb->create_buf_file = create_buf_file_default_callback;
  506         if (!cb->remove_buf_file)
  507                 cb->remove_buf_file = remove_buf_file_default_callback;
  508         chan->cb = cb;
  509 }
  510 
  511 /**
  512  *      relay_hotcpu_callback - CPU hotplug callback
  513  *      @nb: notifier block
  514  *      @action: hotplug action to take
  515  *      @hcpu: CPU number
  516  *
  517  *      Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
  518  */
  519 static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
  520                                 unsigned long action,
  521                                 void *hcpu)
  522 {
  523         unsigned int hotcpu = (unsigned long)hcpu;
  524         struct rchan *chan;
  525 
  526         switch(action) {
  527         case CPU_UP_PREPARE:
  528         case CPU_UP_PREPARE_FROZEN:
  529                 mutex_lock(&relay_channels_mutex);
  530                 list_for_each_entry(chan, &relay_channels, list) {
  531                         if (chan->buf[hotcpu])
  532                                 continue;
  533                         chan->buf[hotcpu] = relay_open_buf(chan, hotcpu);
  534                         if(!chan->buf[hotcpu]) {
  535                                 printk(KERN_ERR
  536                                         "relay_hotcpu_callback: cpu %d buffer "
  537                                         "creation failed\n", hotcpu);
  538                                 mutex_unlock(&relay_channels_mutex);
  539                                 return notifier_from_errno(-ENOMEM);
  540                         }
  541                 }
  542                 mutex_unlock(&relay_channels_mutex);
  543                 break;
  544         case CPU_DEAD:
  545         case CPU_DEAD_FROZEN:
  546                 /* No need to flush the cpu : will be flushed upon
  547                  * final relay_flush() call. */
  548                 break;
  549         }
  550         return NOTIFY_OK;
  551 }
  552 
  553 /**
  554  *      relay_open - create a new relay channel
  555  *      @base_filename: base name of files to create, %NULL for buffering only
  556  *      @parent: dentry of parent directory, %NULL for root directory or buffer
  557  *      @subbuf_size: size of sub-buffers
  558  *      @n_subbufs: number of sub-buffers
  559  *      @cb: client callback functions
  560  *      @private_data: user-defined data
  561  *
  562  *      Returns channel pointer if successful, %NULL otherwise.
  563  *
  564  *      Creates a channel buffer for each cpu using the sizes and
  565  *      attributes specified.  The created channel buffer files
  566  *      will be named base_filename0...base_filenameN-1.  File
  567  *      permissions will be %S_IRUSR.
  568  */
  569 struct rchan *relay_open(const char *base_filename,
  570                          struct dentry *parent,
  571                          size_t subbuf_size,
  572                          size_t n_subbufs,
  573                          struct rchan_callbacks *cb,
  574                          void *private_data)
  575 {
  576         unsigned int i;
  577         struct rchan *chan;
  578 
  579         if (!(subbuf_size && n_subbufs))
  580                 return NULL;
  581         if (subbuf_size > UINT_MAX / n_subbufs)
  582                 return NULL;
  583 
  584         chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
  585         if (!chan)
  586                 return NULL;
  587 
  588         chan->version = RELAYFS_CHANNEL_VERSION;
  589         chan->n_subbufs = n_subbufs;
  590         chan->subbuf_size = subbuf_size;
  591         chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
  592         chan->parent = parent;
  593         chan->private_data = private_data;
  594         if (base_filename) {
  595                 chan->has_base_filename = 1;
  596                 strlcpy(chan->base_filename, base_filename, NAME_MAX);
  597         }
  598         setup_callbacks(chan, cb);
  599         kref_init(&chan->kref);
  600 
  601         mutex_lock(&relay_channels_mutex);
  602         for_each_online_cpu(i) {
  603                 chan->buf[i] = relay_open_buf(chan, i);
  604                 if (!chan->buf[i])
  605                         goto free_bufs;
  606         }
  607         list_add(&chan->list, &relay_channels);
  608         mutex_unlock(&relay_channels_mutex);
  609 
  610         return chan;
  611 
  612 free_bufs:
  613         for_each_possible_cpu(i) {
  614                 if (chan->buf[i])
  615                         relay_close_buf(chan->buf[i]);
  616         }
  617 
  618         kref_put(&chan->kref, relay_destroy_channel);
  619         mutex_unlock(&relay_channels_mutex);
  620         return NULL;
  621 }
  622 EXPORT_SYMBOL_GPL(relay_open);
  623 
  624 struct rchan_percpu_buf_dispatcher {
  625         struct rchan_buf *buf;
  626         struct dentry *dentry;
  627 };
  628 
  629 /* Called in atomic context. */
  630 static void __relay_set_buf_dentry(void *info)
  631 {
  632         struct rchan_percpu_buf_dispatcher *p = info;
  633 
  634         relay_set_buf_dentry(p->buf, p->dentry);
  635 }
  636 
  637 /**
  638  *      relay_late_setup_files - triggers file creation
  639  *      @chan: channel to operate on
  640  *      @base_filename: base name of files to create
  641  *      @parent: dentry of parent directory, %NULL for root directory
  642  *
  643  *      Returns 0 if successful, non-zero otherwise.
  644  *
  645  *      Use to setup files for a previously buffer-only channel.
  646  *      Useful to do early tracing in kernel, before VFS is up, for example.
  647  */
  648 int relay_late_setup_files(struct rchan *chan,
  649                            const char *base_filename,
  650                            struct dentry *parent)
  651 {
  652         int err = 0;
  653         unsigned int i, curr_cpu;
  654         unsigned long flags;
  655         struct dentry *dentry;
  656         struct rchan_percpu_buf_dispatcher disp;
  657 
  658         if (!chan || !base_filename)
  659                 return -EINVAL;
  660 
  661         strlcpy(chan->base_filename, base_filename, NAME_MAX);
  662 
  663         mutex_lock(&relay_channels_mutex);
  664         /* Is chan already set up? */
  665         if (unlikely(chan->has_base_filename)) {
  666                 mutex_unlock(&relay_channels_mutex);
  667                 return -EEXIST;
  668         }
  669         chan->has_base_filename = 1;
  670         chan->parent = parent;
  671         curr_cpu = get_cpu();
  672         /*
  673          * The CPU hotplug notifier ran before us and created buffers with
  674          * no files associated. So it's safe to call relay_setup_buf_file()
  675          * on all currently online CPUs.
  676          */
  677         for_each_online_cpu(i) {
  678                 if (unlikely(!chan->buf[i])) {
  679                         WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n");
  680                         err = -EINVAL;
  681                         break;
  682                 }
  683 
  684                 dentry = relay_create_buf_file(chan, chan->buf[i], i);
  685                 if (unlikely(!dentry)) {
  686                         err = -EINVAL;
  687                         break;
  688                 }
  689 
  690                 if (curr_cpu == i) {
  691                         local_irq_save(flags);
  692                         relay_set_buf_dentry(chan->buf[i], dentry);
  693                         local_irq_restore(flags);
  694                 } else {
  695                         disp.buf = chan->buf[i];
  696                         disp.dentry = dentry;
  697                         smp_mb();
  698                         /* relay_channels_mutex must be held, so wait. */
  699                         err = smp_call_function_single(i,
  700                                                        __relay_set_buf_dentry,
  701                                                        &disp, 1);
  702                 }
  703                 if (unlikely(err))
  704                         break;
  705         }
  706         put_cpu();
  707         mutex_unlock(&relay_channels_mutex);
  708 
  709         return err;
  710 }
  711 
  712 /**
  713  *      relay_switch_subbuf - switch to a new sub-buffer
  714  *      @buf: channel buffer
  715  *      @length: size of current event
  716  *
  717  *      Returns either the length passed in or 0 if full.
  718  *
  719  *      Performs sub-buffer-switch tasks such as invoking callbacks,
  720  *      updating padding counts, waking up readers, etc.
  721  */
  722 size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
  723 {
  724         void *old, *new;
  725         size_t old_subbuf, new_subbuf;
  726 
  727         if (unlikely(length > buf->chan->subbuf_size))
  728                 goto toobig;
  729 
  730         if (buf->offset != buf->chan->subbuf_size + 1) {
  731                 buf->prev_padding = buf->chan->subbuf_size - buf->offset;
  732                 old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
  733                 buf->padding[old_subbuf] = buf->prev_padding;
  734                 buf->subbufs_produced++;
  735                 if (buf->dentry)
  736                         buf->dentry->d_inode->i_size +=
  737                                 buf->chan->subbuf_size -
  738                                 buf->padding[old_subbuf];
  739                 else
  740                         buf->early_bytes += buf->chan->subbuf_size -
  741                                             buf->padding[old_subbuf];
  742                 smp_mb();
  743                 if (waitqueue_active(&buf->read_wait))
  744                         /*
  745                          * Calling wake_up_interruptible() from here
  746                          * will deadlock if we happen to be logging
  747                          * from the scheduler (trying to re-grab
  748                          * rq->lock), so defer it.
  749                          */
  750                         mod_timer(&buf->timer, jiffies + 1);
  751         }
  752 
  753         old = buf->data;
  754         new_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
  755         new = buf->start + new_subbuf * buf->chan->subbuf_size;
  756         buf->offset = 0;
  757         if (!buf->chan->cb->subbuf_start(buf, new, old, buf->prev_padding)) {
  758                 buf->offset = buf->chan->subbuf_size + 1;
  759                 return 0;
  760         }
  761         buf->data = new;
  762         buf->padding[new_subbuf] = 0;
  763 
  764         if (unlikely(length + buf->offset > buf->chan->subbuf_size))
  765                 goto toobig;
  766 
  767         return length;
  768 
  769 toobig:
  770         buf->chan->last_toobig = length;
  771         return 0;
  772 }
  773 EXPORT_SYMBOL_GPL(relay_switch_subbuf);
  774 
  775 /**
  776  *      relay_subbufs_consumed - update the buffer's sub-buffers-consumed count
  777  *      @chan: the channel
  778  *      @cpu: the cpu associated with the channel buffer to update
  779  *      @subbufs_consumed: number of sub-buffers to add to current buf's count
  780  *
  781  *      Adds to the channel buffer's consumed sub-buffer count.
  782  *      subbufs_consumed should be the number of sub-buffers newly consumed,
  783  *      not the total consumed.
  784  *
  785  *      NOTE. Kernel clients don't need to call this function if the channel
  786  *      mode is 'overwrite'.
  787  */
  788 void relay_subbufs_consumed(struct rchan *chan,
  789                             unsigned int cpu,
  790                             size_t subbufs_consumed)
  791 {
  792         struct rchan_buf *buf;
  793 
  794         if (!chan)
  795                 return;
  796 
  797         if (cpu >= NR_CPUS || !chan->buf[cpu] ||
  798                                         subbufs_consumed > chan->n_subbufs)
  799                 return;
  800 
  801         buf = chan->buf[cpu];
  802         if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed)
  803                 buf->subbufs_consumed = buf->subbufs_produced;
  804         else
  805                 buf->subbufs_consumed += subbufs_consumed;
  806 }
  807 EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
  808 
  809 /**
  810  *      relay_close - close the channel
  811  *      @chan: the channel
  812  *
  813  *      Closes all channel buffers and frees the channel.
  814  */
  815 void relay_close(struct rchan *chan)
  816 {
  817         unsigned int i;
  818 
  819         if (!chan)
  820                 return;
  821 
  822         mutex_lock(&relay_channels_mutex);
  823         if (chan->is_global && chan->buf[0])
  824                 relay_close_buf(chan->buf[0]);
  825         else
  826                 for_each_possible_cpu(i)
  827                         if (chan->buf[i])
  828                                 relay_close_buf(chan->buf[i]);
  829 
  830         if (chan->last_toobig)
  831                 printk(KERN_WARNING "relay: one or more items not logged "
  832                        "[item size (%Zd) > sub-buffer size (%Zd)]\n",
  833                        chan->last_toobig, chan->subbuf_size);
  834 
  835         list_del(&chan->list);
  836         kref_put(&chan->kref, relay_destroy_channel);
  837         mutex_unlock(&relay_channels_mutex);
  838 }
  839 EXPORT_SYMBOL_GPL(relay_close);
  840 
  841 /**
  842  *      relay_flush - close the channel
  843  *      @chan: the channel
  844  *
  845  *      Flushes all channel buffers, i.e. forces buffer switch.
  846  */
  847 void relay_flush(struct rchan *chan)
  848 {
  849         unsigned int i;
  850 
  851         if (!chan)
  852                 return;
  853 
  854         if (chan->is_global && chan->buf[0]) {
  855                 relay_switch_subbuf(chan->buf[0], 0);
  856                 return;
  857         }
  858 
  859         mutex_lock(&relay_channels_mutex);
  860         for_each_possible_cpu(i)
  861                 if (chan->buf[i])
  862                         relay_switch_subbuf(chan->buf[i], 0);
  863         mutex_unlock(&relay_channels_mutex);
  864 }
  865 EXPORT_SYMBOL_GPL(relay_flush);
  866 
  867 /**
  868  *      relay_file_open - open file op for relay files
  869  *      @inode: the inode
  870  *      @filp: the file
  871  *
  872  *      Increments the channel buffer refcount.
  873  */
  874 static int relay_file_open(struct inode *inode, struct file *filp)
  875 {
  876         struct rchan_buf *buf = inode->i_private;
  877         kref_get(&buf->kref);
  878         filp->private_data = buf;
  879 
  880         return nonseekable_open(inode, filp);
  881 }
  882 
  883 /**
  884  *      relay_file_mmap - mmap file op for relay files
  885  *      @filp: the file
  886  *      @vma: the vma describing what to map
  887  *
  888  *      Calls upon relay_mmap_buf() to map the file into user space.
  889  */
  890 static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
  891 {
  892         struct rchan_buf *buf = filp->private_data;
  893         return relay_mmap_buf(buf, vma);
  894 }
  895 
  896 /**
  897  *      relay_file_poll - poll file op for relay files
  898  *      @filp: the file
  899  *      @wait: poll table
  900  *
  901  *      Poll implemention.
  902  */
  903 static unsigned int relay_file_poll(struct file *filp, poll_table *wait)
  904 {
  905         unsigned int mask = 0;
  906         struct rchan_buf *buf = filp->private_data;
  907 
  908         if (buf->finalized)
  909                 return POLLERR;
  910 
  911         if (filp->f_mode & FMODE_READ) {
  912                 poll_wait(filp, &buf->read_wait, wait);
  913                 if (!relay_buf_empty(buf))
  914                         mask |= POLLIN | POLLRDNORM;
  915         }
  916 
  917         return mask;
  918 }
  919 
  920 /**
  921  *      relay_file_release - release file op for relay files
  922  *      @inode: the inode
  923  *      @filp: the file
  924  *
  925  *      Decrements the channel refcount, as the filesystem is
  926  *      no longer using it.
  927  */
  928 static int relay_file_release(struct inode *inode, struct file *filp)
  929 {
  930         struct rchan_buf *buf = filp->private_data;
  931         kref_put(&buf->kref, relay_remove_buf);
  932 
  933         return 0;
  934 }
  935 
  936 /*
  937  *      relay_file_read_consume - update the consumed count for the buffer
  938  */
  939 static void relay_file_read_consume(struct rchan_buf *buf,
  940                                     size_t read_pos,
  941                                     size_t bytes_consumed)
  942 {
  943         size_t subbuf_size = buf->chan->subbuf_size;
  944         size_t n_subbufs = buf->chan->n_subbufs;
  945         size_t read_subbuf;
  946 
  947         if (buf->subbufs_produced == buf->subbufs_consumed &&
  948             buf->offset == buf->bytes_consumed)
  949                 return;
  950 
  951         if (buf->bytes_consumed + bytes_consumed > subbuf_size) {
  952                 relay_subbufs_consumed(buf->chan, buf->cpu, 1);
  953                 buf->bytes_consumed = 0;
  954         }
  955 
  956         buf->bytes_consumed += bytes_consumed;
  957         if (!read_pos)
  958                 read_subbuf = buf->subbufs_consumed % n_subbufs;
  959         else
  960                 read_subbuf = read_pos / buf->chan->subbuf_size;
  961         if (buf->bytes_consumed + buf->padding[read_subbuf] == subbuf_size) {
  962                 if ((read_subbuf == buf->subbufs_produced % n_subbufs) &&
  963                     (buf->offset == subbuf_size))
  964                         return;
  965                 relay_subbufs_consumed(buf->chan, buf->cpu, 1);
  966                 buf->bytes_consumed = 0;
  967         }
  968 }
  969 
  970 /*
  971  *      relay_file_read_avail - boolean, are there unconsumed bytes available?
  972  */
  973 static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
  974 {
  975         size_t subbuf_size = buf->chan->subbuf_size;
  976         size_t n_subbufs = buf->chan->n_subbufs;
  977         size_t produced = buf->subbufs_produced;
  978         size_t consumed = buf->subbufs_consumed;
  979 
  980         relay_file_read_consume(buf, read_pos, 0);
  981 
  982         consumed = buf->subbufs_consumed;
  983 
  984         if (unlikely(buf->offset > subbuf_size)) {
  985                 if (produced == consumed)
  986                         return 0;
  987                 return 1;
  988         }
  989 
  990         if (unlikely(produced - consumed >= n_subbufs)) {
  991                 consumed = produced - n_subbufs + 1;
  992                 buf->subbufs_consumed = consumed;
  993                 buf->bytes_consumed = 0;
  994         }
  995 
  996         produced = (produced % n_subbufs) * subbuf_size + buf->offset;
  997         consumed = (consumed % n_subbufs) * subbuf_size + buf->bytes_consumed;
  998 
  999         if (consumed > produced)
 1000                 produced += n_subbufs * subbuf_size;
 1001 
 1002         if (consumed == produced) {
 1003                 if (buf->offset == subbuf_size &&
 1004                     buf->subbufs_produced > buf->subbufs_consumed)
 1005                         return 1;
 1006                 return 0;
 1007         }
 1008 
 1009         return 1;
 1010 }
 1011 
 1012 /**
 1013  *      relay_file_read_subbuf_avail - return bytes available in sub-buffer
 1014  *      @read_pos: file read position
 1015  *      @buf: relay channel buffer
 1016  */
 1017 static size_t relay_file_read_subbuf_avail(size_t read_pos,
 1018                                            struct rchan_buf *buf)
 1019 {
 1020         size_t padding, avail = 0;
 1021         size_t read_subbuf, read_offset, write_subbuf, write_offset;
 1022         size_t subbuf_size = buf->chan->subbuf_size;
 1023 
 1024         write_subbuf = (buf->data - buf->start) / subbuf_size;
 1025         write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset;
 1026         read_subbuf = read_pos / subbuf_size;
 1027         read_offset = read_pos % subbuf_size;
 1028         padding = buf->padding[read_subbuf];
 1029 
 1030         if (read_subbuf == write_subbuf) {
 1031                 if (read_offset + padding < write_offset)
 1032                         avail = write_offset - (read_offset + padding);
 1033         } else
 1034                 avail = (subbuf_size - padding) - read_offset;
 1035 
 1036         return avail;
 1037 }
 1038 
 1039 /**
 1040  *      relay_file_read_start_pos - find the first available byte to read
 1041  *      @read_pos: file read position
 1042  *      @buf: relay channel buffer
 1043  *
 1044  *      If the @read_pos is in the middle of padding, return the
 1045  *      position of the first actually available byte, otherwise
 1046  *      return the original value.
 1047  */
 1048 static size_t relay_file_read_start_pos(size_t read_pos,
 1049                                         struct rchan_buf *buf)
 1050 {
 1051         size_t read_subbuf, padding, padding_start, padding_end;
 1052         size_t subbuf_size = buf->chan->subbuf_size;
 1053         size_t n_subbufs = buf->chan->n_subbufs;
 1054         size_t consumed = buf->subbufs_consumed % n_subbufs;
 1055 
 1056         if (!read_pos)
 1057                 read_pos = consumed * subbuf_size + buf->bytes_consumed;
 1058         read_subbuf = read_pos / subbuf_size;
 1059         padding = buf->padding[read_subbuf];
 1060         padding_start = (read_subbuf + 1) * subbuf_size - padding;
 1061         padding_end = (read_subbuf + 1) * subbuf_size;
 1062         if (read_pos >= padding_start && read_pos < padding_end) {
 1063                 read_subbuf = (read_subbuf + 1) % n_subbufs;
 1064                 read_pos = read_subbuf * subbuf_size;
 1065         }
 1066 
 1067         return read_pos;
 1068 }
 1069 
 1070 /**
 1071  *      relay_file_read_end_pos - return the new read position
 1072  *      @read_pos: file read position
 1073  *      @buf: relay channel buffer
 1074  *      @count: number of bytes to be read
 1075  */
 1076 static size_t relay_file_read_end_pos(struct rchan_buf *buf,
 1077                                       size_t read_pos,
 1078                                       size_t count)
 1079 {
 1080         size_t read_subbuf, padding, end_pos;
 1081         size_t subbuf_size = buf->chan->subbuf_size;
 1082         size_t n_subbufs = buf->chan->n_subbufs;
 1083 
 1084         read_subbuf = read_pos / subbuf_size;
 1085         padding = buf->padding[read_subbuf];
 1086         if (read_pos % subbuf_size + count + padding == subbuf_size)
 1087                 end_pos = (read_subbuf + 1) * subbuf_size;
 1088         else
 1089                 end_pos = read_pos + count;
 1090         if (end_pos >= subbuf_size * n_subbufs)
 1091                 end_pos = 0;
 1092 
 1093         return end_pos;
 1094 }
 1095 
 1096 /*
 1097  *      subbuf_read_actor - read up to one subbuf's worth of data
 1098  */
 1099 static int subbuf_read_actor(size_t read_start,
 1100                              struct rchan_buf *buf,
 1101                              size_t avail,
 1102                              read_descriptor_t *desc,
 1103                              read_actor_t actor)
 1104 {
 1105         void *from;
 1106         int ret = 0;
 1107 
 1108         from = buf->start + read_start;
 1109         ret = avail;
 1110         if (copy_to_user(desc->arg.buf, from, avail)) {
 1111                 desc->error = -EFAULT;
 1112                 ret = 0;
 1113         }
 1114         desc->arg.data += ret;
 1115         desc->written += ret;
 1116         desc->count -= ret;
 1117 
 1118         return ret;
 1119 }
 1120 
 1121 typedef int (*subbuf_actor_t) (size_t read_start,
 1122                                struct rchan_buf *buf,
 1123                                size_t avail,
 1124                                read_descriptor_t *desc,
 1125                                read_actor_t actor);
 1126 
 1127 /*
 1128  *      relay_file_read_subbufs - read count bytes, bridging subbuf boundaries
 1129  */
 1130 static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
 1131                                         subbuf_actor_t subbuf_actor,
 1132                                         read_actor_t actor,
 1133                                         read_descriptor_t *desc)
 1134 {
 1135         struct rchan_buf *buf = filp->private_data;
 1136         size_t read_start, avail;
 1137         int ret;
 1138 
 1139         if (!desc->count)
 1140                 return 0;
 1141 
 1142         mutex_lock(&filp->f_path.dentry->d_inode->i_mutex);
 1143         do {
 1144                 if (!relay_file_read_avail(buf, *ppos))
 1145                         break;
 1146 
 1147                 read_start = relay_file_read_start_pos(*ppos, buf);
 1148                 avail = relay_file_read_subbuf_avail(read_start, buf);
 1149                 if (!avail)
 1150                         break;
 1151 
 1152                 avail = min(desc->count, avail);
 1153                 ret = subbuf_actor(read_start, buf, avail, desc, actor);
 1154                 if (desc->error < 0)
 1155                         break;
 1156 
 1157                 if (ret) {
 1158                         relay_file_read_consume(buf, read_start, ret);
 1159                         *ppos = relay_file_read_end_pos(buf, read_start, ret);
 1160                 }
 1161         } while (desc->count && ret);
 1162         mutex_unlock(&filp->f_path.dentry->d_inode->i_mutex);
 1163 
 1164         return desc->written;
 1165 }
 1166 
 1167 static ssize_t relay_file_read(struct file *filp,
 1168                                char __user *buffer,
 1169                                size_t count,
 1170                                loff_t *ppos)
 1171 {
 1172         read_descriptor_t desc;
 1173         desc.written = 0;
 1174         desc.count = count;
 1175         desc.arg.buf = buffer;
 1176         desc.error = 0;
 1177         return relay_file_read_subbufs(filp, ppos, subbuf_read_actor,
 1178                                        NULL, &desc);
 1179 }
 1180 
 1181 static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed)
 1182 {
 1183         rbuf->bytes_consumed += bytes_consumed;
 1184 
 1185         if (rbuf->bytes_consumed >= rbuf->chan->subbuf_size) {
 1186                 relay_subbufs_consumed(rbuf->chan, rbuf->cpu, 1);
 1187                 rbuf->bytes_consumed %= rbuf->chan->subbuf_size;
 1188         }
 1189 }
 1190 
 1191 static void relay_pipe_buf_release(struct pipe_inode_info *pipe,
 1192                                    struct pipe_buffer *buf)
 1193 {
 1194         struct rchan_buf *rbuf;
 1195 
 1196         rbuf = (struct rchan_buf *)page_private(buf->page);
 1197         relay_consume_bytes(rbuf, buf->private);
 1198 }
 1199 
 1200 static const struct pipe_buf_operations relay_pipe_buf_ops = {
 1201         .can_merge = 0,
 1202         .map = generic_pipe_buf_map,
 1203         .unmap = generic_pipe_buf_unmap,
 1204         .confirm = generic_pipe_buf_confirm,
 1205         .release = relay_pipe_buf_release,
 1206         .steal = generic_pipe_buf_steal,
 1207         .get = generic_pipe_buf_get,
 1208 };
 1209 
 1210 static void relay_page_release(struct splice_pipe_desc *spd, unsigned int i)
 1211 {
 1212 }
 1213 
 1214 /*
 1215  *      subbuf_splice_actor - splice up to one subbuf's worth of data
 1216  */
 1217 static ssize_t subbuf_splice_actor(struct file *in,
 1218                                loff_t *ppos,
 1219                                struct pipe_inode_info *pipe,
 1220                                size_t len,
 1221                                unsigned int flags,
 1222                                int *nonpad_ret)
 1223 {
 1224         unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
 1225         struct rchan_buf *rbuf = in->private_data;
 1226         unsigned int subbuf_size = rbuf->chan->subbuf_size;
 1227         uint64_t pos = (uint64_t) *ppos;
 1228         uint32_t alloc_size = (uint32_t) rbuf->chan->alloc_size;
 1229         size_t read_start = (size_t) do_div(pos, alloc_size);
 1230         size_t read_subbuf = read_start / subbuf_size;
 1231         size_t padding = rbuf->padding[read_subbuf];
 1232         size_t nonpad_end = read_subbuf * subbuf_size + subbuf_size - padding;
 1233         struct page *pages[PIPE_DEF_BUFFERS];
 1234         struct partial_page partial[PIPE_DEF_BUFFERS];
 1235         struct splice_pipe_desc spd = {
 1236                 .pages = pages,
 1237                 .nr_pages = 0,
 1238                 .nr_pages_max = PIPE_DEF_BUFFERS,
 1239                 .partial = partial,
 1240                 .flags = flags,
 1241                 .ops = &relay_pipe_buf_ops,
 1242                 .spd_release = relay_page_release,
 1243         };
 1244         ssize_t ret;
 1245 
 1246         if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
 1247                 return 0;
 1248         if (splice_grow_spd(pipe, &spd))
 1249                 return -ENOMEM;
 1250 
 1251         /*
 1252          * Adjust read len, if longer than what is available
 1253          */
 1254         if (len > (subbuf_size - read_start % subbuf_size))
 1255                 len = subbuf_size - read_start % subbuf_size;
 1256 
 1257         subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT;
 1258         pidx = (read_start / PAGE_SIZE) % subbuf_pages;
 1259         poff = read_start & ~PAGE_MASK;
 1260         nr_pages = min_t(unsigned int, subbuf_pages, pipe->buffers);
 1261 
 1262         for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) {
 1263                 unsigned int this_len, this_end, private;
 1264                 unsigned int cur_pos = read_start + total_len;
 1265 
 1266                 if (!len)
 1267                         break;
 1268 
 1269                 this_len = min_t(unsigned long, len, PAGE_SIZE - poff);
 1270                 private = this_len;
 1271 
 1272                 spd.pages[spd.nr_pages] = rbuf->page_array[pidx];
 1273                 spd.partial[spd.nr_pages].offset = poff;
 1274 
 1275                 this_end = cur_pos + this_len;
 1276                 if (this_end >= nonpad_end) {
 1277                         this_len = nonpad_end - cur_pos;
 1278                         private = this_len + padding;
 1279                 }
 1280                 spd.partial[spd.nr_pages].len = this_len;
 1281                 spd.partial[spd.nr_pages].private = private;
 1282 
 1283                 len -= this_len;
 1284                 total_len += this_len;
 1285                 poff = 0;
 1286                 pidx = (pidx + 1) % subbuf_pages;
 1287 
 1288                 if (this_end >= nonpad_end) {
 1289                         spd.nr_pages++;
 1290                         break;
 1291                 }
 1292         }
 1293 
 1294         ret = 0;
 1295         if (!spd.nr_pages)
 1296                 goto out;
 1297 
 1298         ret = *nonpad_ret = splice_to_pipe(pipe, &spd);
 1299         if (ret < 0 || ret < total_len)
 1300                 goto out;
 1301 
 1302         if (read_start + ret == nonpad_end)
 1303                 ret += padding;
 1304 
 1305 out:
 1306         splice_shrink_spd(&spd);
 1307         return ret;
 1308 }
 1309 
 1310 static ssize_t relay_file_splice_read(struct file *in,
 1311                                       loff_t *ppos,
 1312                                       struct pipe_inode_info *pipe,
 1313                                       size_t len,
 1314                                       unsigned int flags)
 1315 {
 1316         ssize_t spliced;
 1317         int ret;
 1318         int nonpad_ret = 0;
 1319 
 1320         ret = 0;
 1321         spliced = 0;
 1322 
 1323         while (len && !spliced) {
 1324                 ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret);
 1325                 if (ret < 0)
 1326                         break;
 1327                 else if (!ret) {
 1328                         if (flags & SPLICE_F_NONBLOCK)
 1329                                 ret = -EAGAIN;
 1330                         break;
 1331                 }
 1332 
 1333                 *ppos += ret;
 1334                 if (ret > len)
 1335                         len = 0;
 1336                 else
 1337                         len -= ret;
 1338                 spliced += nonpad_ret;
 1339                 nonpad_ret = 0;
 1340         }
 1341 
 1342         if (spliced)
 1343                 return spliced;
 1344 
 1345         return ret;
 1346 }
 1347 
 1348 const struct file_operations relay_file_operations = {
 1349         .open           = relay_file_open,
 1350         .poll           = relay_file_poll,
 1351         .mmap           = relay_file_mmap,
 1352         .read           = relay_file_read,
 1353         .llseek         = no_llseek,
 1354         .release        = relay_file_release,
 1355         .splice_read    = relay_file_splice_read,
 1356 };
 1357 EXPORT_SYMBOL_GPL(relay_file_operations);
 1358 
 1359 static __init int relay_init(void)
 1360 {
 1361 
 1362         hotcpu_notifier(relay_hotcpu_callback, 0);
 1363         return 0;
 1364 }
 1365 
 1366 early_initcall(relay_init);

Cache object: a6c81e2d91540a86a9ca19a373ac8abf


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.