The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/lib/scatterlist.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
    3  *
    4  * Scatterlist handling helpers.
    5  *
    6  * This source code is licensed under the GNU General Public License,
    7  * Version 2. See the file COPYING for more details.
    8  */
    9 #include <linux/export.h>
   10 #include <linux/slab.h>
   11 #include <linux/scatterlist.h>
   12 #include <linux/highmem.h>
   13 #include <linux/kmemleak.h>
   14 
   15 /**
   16  * sg_next - return the next scatterlist entry in a list
   17  * @sg:         The current sg entry
   18  *
   19  * Description:
   20  *   Usually the next entry will be @sg@ + 1, but if this sg element is part
   21  *   of a chained scatterlist, it could jump to the start of a new
   22  *   scatterlist array.
   23  *
   24  **/
   25 struct scatterlist *sg_next(struct scatterlist *sg)
   26 {
   27 #ifdef CONFIG_DEBUG_SG
   28         BUG_ON(sg->sg_magic != SG_MAGIC);
   29 #endif
   30         if (sg_is_last(sg))
   31                 return NULL;
   32 
   33         sg++;
   34         if (unlikely(sg_is_chain(sg)))
   35                 sg = sg_chain_ptr(sg);
   36 
   37         return sg;
   38 }
   39 EXPORT_SYMBOL(sg_next);
   40 
   41 /**
   42  * sg_nents - return total count of entries in scatterlist
   43  * @sg:         The scatterlist
   44  *
   45  * Description:
   46  * Allows to know how many entries are in sg, taking into acount
   47  * chaining as well
   48  *
   49  **/
   50 int sg_nents(struct scatterlist *sg)
   51 {
   52         int nents;
   53         for (nents = 0; sg; sg = sg_next(sg))
   54                 nents++;
   55         return nents;
   56 }
   57 EXPORT_SYMBOL(sg_nents);
   58 
   59 
   60 /**
   61  * sg_last - return the last scatterlist entry in a list
   62  * @sgl:        First entry in the scatterlist
   63  * @nents:      Number of entries in the scatterlist
   64  *
   65  * Description:
   66  *   Should only be used casually, it (currently) scans the entire list
   67  *   to get the last entry.
   68  *
   69  *   Note that the @sgl@ pointer passed in need not be the first one,
   70  *   the important bit is that @nents@ denotes the number of entries that
   71  *   exist from @sgl@.
   72  *
   73  **/
   74 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
   75 {
   76 #ifndef ARCH_HAS_SG_CHAIN
   77         struct scatterlist *ret = &sgl[nents - 1];
   78 #else
   79         struct scatterlist *sg, *ret = NULL;
   80         unsigned int i;
   81 
   82         for_each_sg(sgl, sg, nents, i)
   83                 ret = sg;
   84 
   85 #endif
   86 #ifdef CONFIG_DEBUG_SG
   87         BUG_ON(sgl[0].sg_magic != SG_MAGIC);
   88         BUG_ON(!sg_is_last(ret));
   89 #endif
   90         return ret;
   91 }
   92 EXPORT_SYMBOL(sg_last);
   93 
   94 /**
   95  * sg_init_table - Initialize SG table
   96  * @sgl:           The SG table
   97  * @nents:         Number of entries in table
   98  *
   99  * Notes:
  100  *   If this is part of a chained sg table, sg_mark_end() should be
  101  *   used only on the last table part.
  102  *
  103  **/
  104 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
  105 {
  106         memset(sgl, 0, sizeof(*sgl) * nents);
  107 #ifdef CONFIG_DEBUG_SG
  108         {
  109                 unsigned int i;
  110                 for (i = 0; i < nents; i++)
  111                         sgl[i].sg_magic = SG_MAGIC;
  112         }
  113 #endif
  114         sg_mark_end(&sgl[nents - 1]);
  115 }
  116 EXPORT_SYMBOL(sg_init_table);
  117 
  118 /**
  119  * sg_init_one - Initialize a single entry sg list
  120  * @sg:          SG entry
  121  * @buf:         Virtual address for IO
  122  * @buflen:      IO length
  123  *
  124  **/
  125 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
  126 {
  127         sg_init_table(sg, 1);
  128         sg_set_buf(sg, buf, buflen);
  129 }
  130 EXPORT_SYMBOL(sg_init_one);
  131 
  132 /*
  133  * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
  134  * helpers.
  135  */
  136 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
  137 {
  138         if (nents == SG_MAX_SINGLE_ALLOC) {
  139                 /*
  140                  * Kmemleak doesn't track page allocations as they are not
  141                  * commonly used (in a raw form) for kernel data structures.
  142                  * As we chain together a list of pages and then a normal
  143                  * kmalloc (tracked by kmemleak), in order to for that last
  144                  * allocation not to become decoupled (and thus a
  145                  * false-positive) we need to inform kmemleak of all the
  146                  * intermediate allocations.
  147                  */
  148                 void *ptr = (void *) __get_free_page(gfp_mask);
  149                 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
  150                 return ptr;
  151         } else
  152                 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
  153 }
  154 
  155 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
  156 {
  157         if (nents == SG_MAX_SINGLE_ALLOC) {
  158                 kmemleak_free(sg);
  159                 free_page((unsigned long) sg);
  160         } else
  161                 kfree(sg);
  162 }
  163 
  164 /**
  165  * __sg_free_table - Free a previously mapped sg table
  166  * @table:      The sg table header to use
  167  * @max_ents:   The maximum number of entries per single scatterlist
  168  * @free_fn:    Free function
  169  *
  170  *  Description:
  171  *    Free an sg table previously allocated and setup with
  172  *    __sg_alloc_table().  The @max_ents value must be identical to
  173  *    that previously used with __sg_alloc_table().
  174  *
  175  **/
  176 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
  177                      sg_free_fn *free_fn)
  178 {
  179         struct scatterlist *sgl, *next;
  180 
  181         if (unlikely(!table->sgl))
  182                 return;
  183 
  184         sgl = table->sgl;
  185         while (table->orig_nents) {
  186                 unsigned int alloc_size = table->orig_nents;
  187                 unsigned int sg_size;
  188 
  189                 /*
  190                  * If we have more than max_ents segments left,
  191                  * then assign 'next' to the sg table after the current one.
  192                  * sg_size is then one less than alloc size, since the last
  193                  * element is the chain pointer.
  194                  */
  195                 if (alloc_size > max_ents) {
  196                         next = sg_chain_ptr(&sgl[max_ents - 1]);
  197                         alloc_size = max_ents;
  198                         sg_size = alloc_size - 1;
  199                 } else {
  200                         sg_size = alloc_size;
  201                         next = NULL;
  202                 }
  203 
  204                 table->orig_nents -= sg_size;
  205                 free_fn(sgl, alloc_size);
  206                 sgl = next;
  207         }
  208 
  209         table->sgl = NULL;
  210 }
  211 EXPORT_SYMBOL(__sg_free_table);
  212 
  213 /**
  214  * sg_free_table - Free a previously allocated sg table
  215  * @table:      The mapped sg table header
  216  *
  217  **/
  218 void sg_free_table(struct sg_table *table)
  219 {
  220         __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
  221 }
  222 EXPORT_SYMBOL(sg_free_table);
  223 
  224 /**
  225  * __sg_alloc_table - Allocate and initialize an sg table with given allocator
  226  * @table:      The sg table header to use
  227  * @nents:      Number of entries in sg list
  228  * @max_ents:   The maximum number of entries the allocator returns per call
  229  * @gfp_mask:   GFP allocation mask
  230  * @alloc_fn:   Allocator to use
  231  *
  232  * Description:
  233  *   This function returns a @table @nents long. The allocator is
  234  *   defined to return scatterlist chunks of maximum size @max_ents.
  235  *   Thus if @nents is bigger than @max_ents, the scatterlists will be
  236  *   chained in units of @max_ents.
  237  *
  238  * Notes:
  239  *   If this function returns non-0 (eg failure), the caller must call
  240  *   __sg_free_table() to cleanup any leftover allocations.
  241  *
  242  **/
  243 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
  244                      unsigned int max_ents, gfp_t gfp_mask,
  245                      sg_alloc_fn *alloc_fn)
  246 {
  247         struct scatterlist *sg, *prv;
  248         unsigned int left;
  249 
  250 #ifndef ARCH_HAS_SG_CHAIN
  251         if (WARN_ON_ONCE(nents > max_ents))
  252                 return -EINVAL;
  253 #endif
  254 
  255         memset(table, 0, sizeof(*table));
  256 
  257         left = nents;
  258         prv = NULL;
  259         do {
  260                 unsigned int sg_size, alloc_size = left;
  261 
  262                 if (alloc_size > max_ents) {
  263                         alloc_size = max_ents;
  264                         sg_size = alloc_size - 1;
  265                 } else
  266                         sg_size = alloc_size;
  267 
  268                 left -= sg_size;
  269 
  270                 sg = alloc_fn(alloc_size, gfp_mask);
  271                 if (unlikely(!sg)) {
  272                         /*
  273                          * Adjust entry count to reflect that the last
  274                          * entry of the previous table won't be used for
  275                          * linkage.  Without this, sg_kfree() may get
  276                          * confused.
  277                          */
  278                         if (prv)
  279                                 table->nents = ++table->orig_nents;
  280 
  281                         return -ENOMEM;
  282                 }
  283 
  284                 sg_init_table(sg, alloc_size);
  285                 table->nents = table->orig_nents += sg_size;
  286 
  287                 /*
  288                  * If this is the first mapping, assign the sg table header.
  289                  * If this is not the first mapping, chain previous part.
  290                  */
  291                 if (prv)
  292                         sg_chain(prv, max_ents, sg);
  293                 else
  294                         table->sgl = sg;
  295 
  296                 /*
  297                  * If no more entries after this one, mark the end
  298                  */
  299                 if (!left)
  300                         sg_mark_end(&sg[sg_size - 1]);
  301 
  302                 prv = sg;
  303         } while (left);
  304 
  305         return 0;
  306 }
  307 EXPORT_SYMBOL(__sg_alloc_table);
  308 
  309 /**
  310  * sg_alloc_table - Allocate and initialize an sg table
  311  * @table:      The sg table header to use
  312  * @nents:      Number of entries in sg list
  313  * @gfp_mask:   GFP allocation mask
  314  *
  315  *  Description:
  316  *    Allocate and initialize an sg table. If @nents@ is larger than
  317  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
  318  *
  319  **/
  320 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
  321 {
  322         int ret;
  323 
  324         ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
  325                                gfp_mask, sg_kmalloc);
  326         if (unlikely(ret))
  327                 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
  328 
  329         return ret;
  330 }
  331 EXPORT_SYMBOL(sg_alloc_table);
  332 
  333 /**
  334  * sg_alloc_table_from_pages - Allocate and initialize an sg table from
  335  *                             an array of pages
  336  * @sgt:        The sg table header to use
  337  * @pages:      Pointer to an array of page pointers
  338  * @n_pages:    Number of pages in the pages array
  339  * @offset:     Offset from start of the first page to the start of a buffer
  340  * @size:       Number of valid bytes in the buffer (after offset)
  341  * @gfp_mask:   GFP allocation mask
  342  *
  343  *  Description:
  344  *    Allocate and initialize an sg table from a list of pages. Contiguous
  345  *    ranges of the pages are squashed into a single scatterlist node. A user
  346  *    may provide an offset at a start and a size of valid data in a buffer
  347  *    specified by the page array. The returned sg table is released by
  348  *    sg_free_table.
  349  *
  350  * Returns:
  351  *   0 on success, negative error on failure
  352  */
  353 int sg_alloc_table_from_pages(struct sg_table *sgt,
  354         struct page **pages, unsigned int n_pages,
  355         unsigned long offset, unsigned long size,
  356         gfp_t gfp_mask)
  357 {
  358         unsigned int chunks;
  359         unsigned int i;
  360         unsigned int cur_page;
  361         int ret;
  362         struct scatterlist *s;
  363 
  364         /* compute number of contiguous chunks */
  365         chunks = 1;
  366         for (i = 1; i < n_pages; ++i)
  367                 if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
  368                         ++chunks;
  369 
  370         ret = sg_alloc_table(sgt, chunks, gfp_mask);
  371         if (unlikely(ret))
  372                 return ret;
  373 
  374         /* merging chunks and putting them into the scatterlist */
  375         cur_page = 0;
  376         for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
  377                 unsigned long chunk_size;
  378                 unsigned int j;
  379 
  380                 /* look for the end of the current chunk */
  381                 for (j = cur_page + 1; j < n_pages; ++j)
  382                         if (page_to_pfn(pages[j]) !=
  383                             page_to_pfn(pages[j - 1]) + 1)
  384                                 break;
  385 
  386                 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
  387                 sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
  388                 size -= chunk_size;
  389                 offset = 0;
  390                 cur_page = j;
  391         }
  392 
  393         return 0;
  394 }
  395 EXPORT_SYMBOL(sg_alloc_table_from_pages);
  396 
  397 /**
  398  * sg_miter_start - start mapping iteration over a sg list
  399  * @miter: sg mapping iter to be started
  400  * @sgl: sg list to iterate over
  401  * @nents: number of sg entries
  402  *
  403  * Description:
  404  *   Starts mapping iterator @miter.
  405  *
  406  * Context:
  407  *   Don't care.
  408  */
  409 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
  410                     unsigned int nents, unsigned int flags)
  411 {
  412         memset(miter, 0, sizeof(struct sg_mapping_iter));
  413 
  414         miter->__sg = sgl;
  415         miter->__nents = nents;
  416         miter->__offset = 0;
  417         WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
  418         miter->__flags = flags;
  419 }
  420 EXPORT_SYMBOL(sg_miter_start);
  421 
  422 /**
  423  * sg_miter_next - proceed mapping iterator to the next mapping
  424  * @miter: sg mapping iter to proceed
  425  *
  426  * Description:
  427  *   Proceeds @miter to the next mapping.  @miter should have been started
  428  *   using sg_miter_start().  On successful return, @miter->page,
  429  *   @miter->addr and @miter->length point to the current mapping.
  430  *
  431  * Context:
  432  *   Preemption disabled if SG_MITER_ATOMIC.  Preemption must stay disabled
  433  *   till @miter is stopped.  May sleep if !SG_MITER_ATOMIC.
  434  *
  435  * Returns:
  436  *   true if @miter contains the next mapping.  false if end of sg
  437  *   list is reached.
  438  */
  439 bool sg_miter_next(struct sg_mapping_iter *miter)
  440 {
  441         unsigned int off, len;
  442 
  443         /* check for end and drop resources from the last iteration */
  444         if (!miter->__nents)
  445                 return false;
  446 
  447         sg_miter_stop(miter);
  448 
  449         /* get to the next sg if necessary.  __offset is adjusted by stop */
  450         while (miter->__offset == miter->__sg->length) {
  451                 if (--miter->__nents) {
  452                         miter->__sg = sg_next(miter->__sg);
  453                         miter->__offset = 0;
  454                 } else
  455                         return false;
  456         }
  457 
  458         /* map the next page */
  459         off = miter->__sg->offset + miter->__offset;
  460         len = miter->__sg->length - miter->__offset;
  461 
  462         miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
  463         off &= ~PAGE_MASK;
  464         miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
  465         miter->consumed = miter->length;
  466 
  467         if (miter->__flags & SG_MITER_ATOMIC)
  468                 miter->addr = kmap_atomic(miter->page) + off;
  469         else
  470                 miter->addr = kmap(miter->page) + off;
  471 
  472         return true;
  473 }
  474 EXPORT_SYMBOL(sg_miter_next);
  475 
  476 /**
  477  * sg_miter_stop - stop mapping iteration
  478  * @miter: sg mapping iter to be stopped
  479  *
  480  * Description:
  481  *   Stops mapping iterator @miter.  @miter should have been started
  482  *   started using sg_miter_start().  A stopped iteration can be
  483  *   resumed by calling sg_miter_next() on it.  This is useful when
  484  *   resources (kmap) need to be released during iteration.
  485  *
  486  * Context:
  487  *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
  488  *   otherwise.
  489  */
  490 void sg_miter_stop(struct sg_mapping_iter *miter)
  491 {
  492         WARN_ON(miter->consumed > miter->length);
  493 
  494         /* drop resources from the last iteration */
  495         if (miter->addr) {
  496                 miter->__offset += miter->consumed;
  497 
  498                 if (miter->__flags & SG_MITER_TO_SG)
  499                         flush_kernel_dcache_page(miter->page);
  500 
  501                 if (miter->__flags & SG_MITER_ATOMIC) {
  502                         WARN_ON_ONCE(preemptible());
  503                         kunmap_atomic(miter->addr);
  504                 } else
  505                         kunmap(miter->page);
  506 
  507                 miter->page = NULL;
  508                 miter->addr = NULL;
  509                 miter->length = 0;
  510                 miter->consumed = 0;
  511         }
  512 }
  513 EXPORT_SYMBOL(sg_miter_stop);
  514 
  515 /**
  516  * sg_copy_buffer - Copy data between a linear buffer and an SG list
  517  * @sgl:                 The SG list
  518  * @nents:               Number of SG entries
  519  * @buf:                 Where to copy from
  520  * @buflen:              The number of bytes to copy
  521  * @to_buffer:           transfer direction (non zero == from an sg list to a
  522  *                       buffer, 0 == from a buffer to an sg list
  523  *
  524  * Returns the number of copied bytes.
  525  *
  526  **/
  527 static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
  528                              void *buf, size_t buflen, int to_buffer)
  529 {
  530         unsigned int offset = 0;
  531         struct sg_mapping_iter miter;
  532         unsigned long flags;
  533         unsigned int sg_flags = SG_MITER_ATOMIC;
  534 
  535         if (to_buffer)
  536                 sg_flags |= SG_MITER_FROM_SG;
  537         else
  538                 sg_flags |= SG_MITER_TO_SG;
  539 
  540         sg_miter_start(&miter, sgl, nents, sg_flags);
  541 
  542         local_irq_save(flags);
  543 
  544         while (sg_miter_next(&miter) && offset < buflen) {
  545                 unsigned int len;
  546 
  547                 len = min(miter.length, buflen - offset);
  548 
  549                 if (to_buffer)
  550                         memcpy(buf + offset, miter.addr, len);
  551                 else
  552                         memcpy(miter.addr, buf + offset, len);
  553 
  554                 offset += len;
  555         }
  556 
  557         sg_miter_stop(&miter);
  558 
  559         local_irq_restore(flags);
  560         return offset;
  561 }
  562 
  563 /**
  564  * sg_copy_from_buffer - Copy from a linear buffer to an SG list
  565  * @sgl:                 The SG list
  566  * @nents:               Number of SG entries
  567  * @buf:                 Where to copy from
  568  * @buflen:              The number of bytes to copy
  569  *
  570  * Returns the number of copied bytes.
  571  *
  572  **/
  573 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
  574                            void *buf, size_t buflen)
  575 {
  576         return sg_copy_buffer(sgl, nents, buf, buflen, 0);
  577 }
  578 EXPORT_SYMBOL(sg_copy_from_buffer);
  579 
  580 /**
  581  * sg_copy_to_buffer - Copy from an SG list to a linear buffer
  582  * @sgl:                 The SG list
  583  * @nents:               Number of SG entries
  584  * @buf:                 Where to copy to
  585  * @buflen:              The number of bytes to copy
  586  *
  587  * Returns the number of copied bytes.
  588  *
  589  **/
  590 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
  591                          void *buf, size_t buflen)
  592 {
  593         return sg_copy_buffer(sgl, nents, buf, buflen, 1);
  594 }
  595 EXPORT_SYMBOL(sg_copy_to_buffer);

Cache object: b07f06db44b5e9e1f05cac5e753c889b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.