The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_sglist.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2008 Yahoo!, Inc.
    3  * All rights reserved.
    4  * Written by: John Baldwin <jhb@FreeBSD.org>
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 3. Neither the name of the author nor the names of any co-contributors
   15  *    may be used to endorse or promote products derived from this software
   16  *    without specific prior written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD$");
   33 
   34 #include <sys/param.h>
   35 #include <sys/kernel.h>
   36 #include <sys/malloc.h>
   37 #include <sys/mbuf.h>
   38 #include <sys/proc.h>
   39 #include <sys/sglist.h>
   40 #include <sys/uio.h>
   41 
   42 #include <vm/vm.h>
   43 #include <vm/pmap.h>
   44 #include <vm/vm_map.h>
   45 
   46 #include <sys/ktr.h>
   47 
   48 static MALLOC_DEFINE(M_SGLIST, "sglist", "scatter/gather lists");
   49 
   50 /*
   51  * Convenience macros to save the state of an sglist so it can be restored
   52  * if an append attempt fails.  Since sglist's only grow we only need to
   53  * save the current count of segments and the length of the ending segment.
   54  * Earlier segments will not be changed by an append, and the only change
   55  * that can occur to the ending segment is that it can be extended.
   56  */
   57 struct sgsave {
   58         u_short sg_nseg;
   59         size_t ss_len;
   60 };
   61 
   62 #define SGLIST_SAVE(sg, sgsave) do {                                    \
   63         (sgsave).sg_nseg = (sg)->sg_nseg;                               \
   64         if ((sgsave).sg_nseg > 0)                                       \
   65                 (sgsave).ss_len = (sg)->sg_segs[(sgsave).sg_nseg - 1].ss_len; \
   66         else                                                            \
   67                 (sgsave).ss_len = 0;                                    \
   68 } while (0)
   69 
   70 #define SGLIST_RESTORE(sg, sgsave) do {                                 \
   71         (sg)->sg_nseg = (sgsave).sg_nseg;                               \
   72         if ((sgsave).sg_nseg > 0)                                       \
   73                 (sg)->sg_segs[(sgsave).sg_nseg - 1].ss_len = (sgsave).ss_len; \
   74 } while (0)
   75 
   76 /*
   77  * Append a single (paddr, len) to a sglist.  sg is the list and ss is
   78  * the current segment in the list.  If we run out of segments then
   79  * EFBIG will be returned.
   80  */
   81 static __inline int
   82 _sglist_append_range(struct sglist *sg, struct sglist_seg **ssp,
   83     vm_paddr_t paddr, size_t len)
   84 {
   85         struct sglist_seg *ss;
   86 
   87         ss = *ssp;
   88         if (ss->ss_paddr + ss->ss_len == paddr)
   89                 ss->ss_len += len;
   90         else {
   91                 if (sg->sg_nseg == sg->sg_maxseg)
   92                         return (EFBIG);
   93                 ss++;
   94                 ss->ss_paddr = paddr;
   95                 ss->ss_len = len;
   96                 sg->sg_nseg++;
   97                 *ssp = ss;
   98         }
   99         return (0);
  100 }
  101 
  102 /*
  103  * Worker routine to append a virtual address range (either kernel or
  104  * user) to a scatter/gather list.
  105  */
  106 static __inline int
  107 _sglist_append_buf(struct sglist *sg, void *buf, size_t len, pmap_t pmap,
  108     size_t *donep)
  109 {
  110         struct sglist_seg *ss;
  111         vm_offset_t vaddr, offset;
  112         vm_paddr_t paddr;
  113         size_t seglen;
  114         int error;
  115 
  116         if (donep)
  117                 *donep = 0;
  118         if (len == 0)
  119                 return (0);
  120 
  121         /* Do the first page.  It may have an offset. */
  122         vaddr = (vm_offset_t)buf;
  123         offset = vaddr & PAGE_MASK;
  124         if (pmap != NULL)
  125                 paddr = pmap_extract(pmap, vaddr);
  126         else
  127                 paddr = pmap_kextract(vaddr);
  128         seglen = MIN(len, PAGE_SIZE - offset);
  129         if (sg->sg_nseg == 0) {
  130                 ss = sg->sg_segs;
  131                 ss->ss_paddr = paddr;
  132                 ss->ss_len = seglen;
  133                 sg->sg_nseg = 1;
  134         } else {
  135                 ss = &sg->sg_segs[sg->sg_nseg - 1];
  136                 error = _sglist_append_range(sg, &ss, paddr, seglen);
  137                 if (error)
  138                         return (error);
  139         }
  140         vaddr += seglen;
  141         len -= seglen;
  142         if (donep)
  143                 *donep += seglen;
  144 
  145         while (len > 0) {
  146                 seglen = MIN(len, PAGE_SIZE);
  147                 if (pmap != NULL)
  148                         paddr = pmap_extract(pmap, vaddr);
  149                 else
  150                         paddr = pmap_kextract(vaddr);
  151                 error = _sglist_append_range(sg, &ss, paddr, seglen);
  152                 if (error)
  153                         return (error);
  154                 vaddr += seglen;
  155                 len -= seglen;
  156                 if (donep)
  157                         *donep += seglen;
  158         }
  159 
  160         return (0);
  161 }
  162 
  163 /*
  164  * Determine the number of scatter/gather list elements needed to
  165  * describe a kernel virtual address range.
  166  */
  167 int
  168 sglist_count(void *buf, size_t len)
  169 {
  170         vm_offset_t vaddr, vendaddr;
  171         vm_paddr_t lastaddr, paddr;
  172         int nsegs;
  173 
  174         if (len == 0)
  175                 return (0);
  176 
  177         vaddr = trunc_page((vm_offset_t)buf);
  178         vendaddr = (vm_offset_t)buf + len;
  179         nsegs = 1;
  180         lastaddr = pmap_kextract(vaddr);
  181         vaddr += PAGE_SIZE;
  182         while (vaddr < vendaddr) {
  183                 paddr = pmap_kextract(vaddr);
  184                 if (lastaddr + PAGE_SIZE != paddr)
  185                         nsegs++;
  186                 lastaddr = paddr;
  187                 vaddr += PAGE_SIZE;
  188         }
  189         return (nsegs);
  190 }
  191 
  192 /*
  193  * Allocate a scatter/gather list along with 'nsegs' segments.  The
  194  * 'mflags' parameters are the same as passed to malloc(9).  The caller
  195  * should use sglist_free() to free this list.
  196  */
  197 struct sglist *
  198 sglist_alloc(int nsegs, int mflags)
  199 {
  200         struct sglist *sg;
  201 
  202         sg = malloc(sizeof(struct sglist) + nsegs * sizeof(struct sglist_seg),
  203             M_SGLIST, mflags);
  204         if (sg == NULL)
  205                 return (NULL);
  206         sglist_init(sg, nsegs, (struct sglist_seg *)(sg + 1));
  207         return (sg);
  208 }
  209 
  210 /*
  211  * Free a scatter/gather list allocated via sglist_allc().
  212  */
  213 void
  214 sglist_free(struct sglist *sg)
  215 {
  216 
  217         if (refcount_release(&sg->sg_refs))
  218                 free(sg, M_SGLIST);
  219 }
  220 
  221 /*
  222  * Append the segments to describe a single kernel virtual address
  223  * range to a scatter/gather list.  If there are insufficient
  224  * segments, then this fails with EFBIG.
  225  */
  226 int
  227 sglist_append(struct sglist *sg, void *buf, size_t len)
  228 {
  229         struct sgsave save;
  230         int error;
  231 
  232         if (sg->sg_maxseg == 0)
  233                 return (EINVAL);
  234         SGLIST_SAVE(sg, save);
  235         error = _sglist_append_buf(sg, buf, len, NULL, NULL);
  236         if (error)
  237                 SGLIST_RESTORE(sg, save);
  238         return (error);
  239 }
  240 
  241 /*
  242  * Append a single physical address range to a scatter/gather list.
  243  * If there are insufficient segments, then this fails with EFBIG.
  244  */
  245 int
  246 sglist_append_phys(struct sglist *sg, vm_paddr_t paddr, size_t len)
  247 {
  248         struct sglist_seg *ss;
  249         struct sgsave save;
  250         int error;
  251 
  252         if (sg->sg_maxseg == 0)
  253                 return (EINVAL);
  254         if (len == 0)
  255                 return (0);
  256 
  257         if (sg->sg_nseg == 0) {
  258                 sg->sg_segs[0].ss_paddr = paddr;
  259                 sg->sg_segs[0].ss_len = len;
  260                 sg->sg_nseg = 1;
  261                 return (0);
  262         }
  263         ss = &sg->sg_segs[sg->sg_nseg - 1];
  264         SGLIST_SAVE(sg, save);
  265         error = _sglist_append_range(sg, &ss, paddr, len);
  266         if (error)
  267                 SGLIST_RESTORE(sg, save);
  268         return (error);
  269 }
  270 
  271 /*
  272  * Append the segments that describe a single mbuf chain to a
  273  * scatter/gather list.  If there are insufficient segments, then this
  274  * fails with EFBIG.
  275  */
  276 int
  277 sglist_append_mbuf(struct sglist *sg, struct mbuf *m0)
  278 {
  279         struct sgsave save;
  280         struct mbuf *m;
  281         int error;
  282 
  283         if (sg->sg_maxseg == 0)
  284                 return (EINVAL);
  285 
  286         error = 0;
  287         SGLIST_SAVE(sg, save);
  288         for (m = m0; m != NULL; m = m->m_next) {
  289                 if (m->m_len > 0) {
  290                         error = sglist_append(sg, m->m_data, m->m_len);
  291                         if (error) {
  292                                 SGLIST_RESTORE(sg, save);
  293                                 return (error);
  294                         }
  295                 }
  296         }
  297         return (0);
  298 }
  299 
  300 /*
  301  * Append the segments that describe a single user address range to a
  302  * scatter/gather list.  If there are insufficient segments, then this
  303  * fails with EFBIG.
  304  */
  305 int
  306 sglist_append_user(struct sglist *sg, void *buf, size_t len, struct thread *td)
  307 {
  308         struct sgsave save;
  309         int error;
  310 
  311         if (sg->sg_maxseg == 0)
  312                 return (EINVAL);
  313         SGLIST_SAVE(sg, save);
  314         error = _sglist_append_buf(sg, buf, len,
  315             vmspace_pmap(td->td_proc->p_vmspace), NULL);
  316         if (error)
  317                 SGLIST_RESTORE(sg, save);
  318         return (error);
  319 }
  320 
  321 /*
  322  * Append the segments that describe a single uio to a scatter/gather
  323  * list.  If there are insufficient segments, then this fails with
  324  * EFBIG.
  325  */
  326 int
  327 sglist_append_uio(struct sglist *sg, struct uio *uio)
  328 {
  329         struct iovec *iov;
  330         struct sgsave save;
  331         size_t resid, minlen;
  332         pmap_t pmap;
  333         int error, i;
  334 
  335         if (sg->sg_maxseg == 0)
  336                 return (EINVAL);
  337 
  338         resid = uio->uio_resid;
  339         iov = uio->uio_iov;
  340 
  341         if (uio->uio_segflg == UIO_USERSPACE) {
  342                 KASSERT(uio->uio_td != NULL,
  343                     ("sglist_append_uio: USERSPACE but no thread"));
  344                 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
  345         } else
  346                 pmap = NULL;
  347 
  348         error = 0;
  349         SGLIST_SAVE(sg, save);
  350         for (i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
  351                 /*
  352                  * Now at the first iovec to load.  Load each iovec
  353                  * until we have exhausted the residual count.
  354                  */
  355                 minlen = MIN(resid, iov[i].iov_len);
  356                 if (minlen > 0) {
  357                         error = _sglist_append_buf(sg, iov[i].iov_base, minlen,
  358                             pmap, NULL);
  359                         if (error) {
  360                                 SGLIST_RESTORE(sg, save);
  361                                 return (error);
  362                         }
  363                         resid -= minlen;
  364                 }
  365         }
  366         return (0);
  367 }
  368 
  369 /*
  370  * Append the segments that describe at most 'resid' bytes from a
  371  * single uio to a scatter/gather list.  If there are insufficient
  372  * segments, then only the amount that fits is appended.
  373  */
  374 int
  375 sglist_consume_uio(struct sglist *sg, struct uio *uio, size_t resid)
  376 {
  377         struct iovec *iov;
  378         size_t done;
  379         pmap_t pmap;
  380         int error, len;
  381 
  382         if (sg->sg_maxseg == 0)
  383                 return (EINVAL);
  384 
  385         if (uio->uio_segflg == UIO_USERSPACE) {
  386                 KASSERT(uio->uio_td != NULL,
  387                     ("sglist_consume_uio: USERSPACE but no thread"));
  388                 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
  389         } else
  390                 pmap = NULL;
  391 
  392         error = 0;
  393         while (resid > 0 && uio->uio_resid) {
  394                 iov = uio->uio_iov;
  395                 len = iov->iov_len;
  396                 if (len == 0) {
  397                         uio->uio_iov++;
  398                         uio->uio_iovcnt--;
  399                         continue;
  400                 }
  401                 if (len > resid)
  402                         len = resid;
  403 
  404                 /*
  405                  * Try to append this iovec.  If we run out of room,
  406                  * then break out of the loop.
  407                  */
  408                 error = _sglist_append_buf(sg, iov->iov_base, len, pmap, &done);
  409                 iov->iov_base = (char *)iov->iov_base + done;
  410                 iov->iov_len -= done;
  411                 uio->uio_resid -= done;
  412                 uio->uio_offset += done;
  413                 resid -= done;
  414                 if (error)
  415                         break;
  416         }
  417         return (0);
  418 }
  419 
  420 /*
  421  * Allocate and populate a scatter/gather list to describe a single
  422  * kernel virtual address range.
  423  */
  424 struct sglist *
  425 sglist_build(void *buf, size_t len, int mflags)
  426 {
  427         struct sglist *sg;
  428         int nsegs;
  429 
  430         if (len == 0)
  431                 return (NULL);
  432 
  433         nsegs = sglist_count(buf, len);
  434         sg = sglist_alloc(nsegs, mflags);
  435         if (sg == NULL)
  436                 return (NULL);
  437         if (sglist_append(sg, buf, len) != 0) {
  438                 sglist_free(sg);
  439                 return (NULL);
  440         }
  441         return (sg);
  442 }
  443 
  444 /*
  445  * Clone a new copy of a scatter/gather list.
  446  */
  447 struct sglist *
  448 sglist_clone(struct sglist *sg, int mflags)
  449 {
  450         struct sglist *new;
  451 
  452         if (sg == NULL)
  453                 return (NULL);
  454         new = sglist_alloc(sg->sg_maxseg, mflags);
  455         if (new == NULL)
  456                 return (NULL);
  457         new->sg_nseg = sg->sg_nseg;
  458         bcopy(sg->sg_segs, new->sg_segs, sizeof(struct sglist_seg) *
  459             sg->sg_nseg);
  460         return (new);
  461 }
  462 
  463 /*
  464  * Calculate the total length of the segments described in a
  465  * scatter/gather list.
  466  */
  467 size_t
  468 sglist_length(struct sglist *sg)
  469 {
  470         size_t space;
  471         int i;
  472 
  473         space = 0;
  474         for (i = 0; i < sg->sg_nseg; i++)
  475                 space += sg->sg_segs[i].ss_len;
  476         return (space);
  477 }
  478 
  479 /*
  480  * Split a scatter/gather list into two lists.  The scatter/gather
  481  * entries for the first 'length' bytes of the 'original' list are
  482  * stored in the '*head' list and are removed from 'original'.
  483  *
  484  * If '*head' is NULL, then a new list will be allocated using
  485  * 'mflags'.  If M_NOWAIT is specified and the allocation fails,
  486  * ENOMEM will be returned.
  487  *
  488  * If '*head' is not NULL, it should point to an empty sglist.  If it
  489  * does not have enough room for the remaining space, then EFBIG will
  490  * be returned.  If '*head' is not empty, then EINVAL will be
  491  * returned.
  492  *
  493  * If 'original' is shared (refcount > 1), then EDOOFUS will be
  494  * returned.
  495  */
  496 int
  497 sglist_split(struct sglist *original, struct sglist **head, size_t length,
  498     int mflags)
  499 {
  500         struct sglist *sg;
  501         size_t space, split;
  502         int count, i;
  503 
  504         if (original->sg_refs > 1)
  505                 return (EDOOFUS);
  506 
  507         /* Figure out how big of a sglist '*head' has to hold. */
  508         count = 0;
  509         space = 0;
  510         split = 0;
  511         for (i = 0; i < original->sg_nseg; i++) {
  512                 space += original->sg_segs[i].ss_len;
  513                 count++;
  514                 if (space >= length) {
  515                         /*
  516                          * If 'length' falls in the middle of a
  517                          * scatter/gather list entry, then 'split'
  518                          * holds how much of that entry will remain in
  519                          * 'original'.
  520                          */
  521                         split = space - length;
  522                         break;
  523                 }
  524         }
  525 
  526         /* Nothing to do, so leave head empty. */
  527         if (count == 0)
  528                 return (0);
  529 
  530         if (*head == NULL) {
  531                 sg = sglist_alloc(count, mflags);
  532                 if (sg == NULL)
  533                         return (ENOMEM);
  534                 *head = sg;
  535         } else {
  536                 sg = *head;
  537                 if (sg->sg_maxseg < count)
  538                         return (EFBIG);
  539                 if (sg->sg_nseg != 0)
  540                         return (EINVAL);
  541         }
  542 
  543         /* Copy 'count' entries to 'sg' from 'original'. */
  544         bcopy(original->sg_segs, sg->sg_segs, count *
  545             sizeof(struct sglist_seg));
  546         sg->sg_nseg = count;
  547 
  548         /*
  549          * If we had to split a list entry, fixup the last entry in
  550          * 'sg' and the new first entry in 'original'.  We also
  551          * decrement 'count' by 1 since we will only be removing
  552          * 'count - 1' segments from 'original' now.
  553          */
  554         if (split != 0) {
  555                 count--;
  556                 sg->sg_segs[count].ss_len -= split;
  557                 original->sg_segs[count].ss_paddr =
  558                     sg->sg_segs[count].ss_paddr + split;
  559                 original->sg_segs[count].ss_len = split;
  560         }
  561 
  562         /* Trim 'count' entries from the front of 'original'. */
  563         original->sg_nseg -= count;
  564         bcopy(original->sg_segs + count, original->sg_segs, count *
  565             sizeof(struct sglist_seg));
  566         return (0);
  567 }
  568 
  569 /*
  570  * Append the scatter/gather list elements in 'second' to the
  571  * scatter/gather list 'first'.  If there is not enough space in
  572  * 'first', EFBIG is returned.
  573  */
  574 int
  575 sglist_join(struct sglist *first, struct sglist *second)
  576 {
  577         struct sglist_seg *flast, *sfirst;
  578         int append;
  579 
  580         /* If 'second' is empty, there is nothing to do. */
  581         if (second->sg_nseg == 0)
  582                 return (0);
  583 
  584         /*
  585          * If the first entry in 'second' can be appended to the last entry
  586          * in 'first' then set append to '1'.
  587          */
  588         append = 0;
  589         flast = &first->sg_segs[first->sg_nseg - 1];
  590         sfirst = &second->sg_segs[0];
  591         if (first->sg_nseg != 0 &&
  592             flast->ss_paddr + flast->ss_len == sfirst->ss_paddr)
  593                 append = 1;
  594 
  595         /* Make sure 'first' has enough room. */
  596         if (first->sg_nseg + second->sg_nseg - append > first->sg_maxseg)
  597                 return (EFBIG);
  598 
  599         /* Merge last in 'first' and first in 'second' if needed. */
  600         if (append)
  601                 flast->ss_len += sfirst->ss_len;
  602 
  603         /* Append new segments from 'second' to 'first'. */
  604         bcopy(first->sg_segs + first->sg_nseg, second->sg_segs + append,
  605             (second->sg_nseg - append) * sizeof(struct sglist_seg));
  606         first->sg_nseg += second->sg_nseg - append;
  607         sglist_reset(second);
  608         return (0);
  609 }
  610 
  611 /*
  612  * Generate a new scatter/gather list from a range of an existing
  613  * scatter/gather list.  The 'offset' and 'length' parameters specify
  614  * the logical range of the 'original' list to extract.  If that range
  615  * is not a subset of the length of 'original', then EINVAL is
  616  * returned.  The new scatter/gather list is stored in '*slice'.
  617  *
  618  * If '*slice' is NULL, then a new list will be allocated using
  619  * 'mflags'.  If M_NOWAIT is specified and the allocation fails,
  620  * ENOMEM will be returned.
  621  *
  622  * If '*slice' is not NULL, it should point to an empty sglist.  If it
  623  * does not have enough room for the remaining space, then EFBIG will
  624  * be returned.  If '*slice' is not empty, then EINVAL will be
  625  * returned.
  626  */
  627 int
  628 sglist_slice(struct sglist *original, struct sglist **slice, size_t offset,
  629     size_t length, int mflags)
  630 {
  631         struct sglist *sg;
  632         size_t space, end, foffs, loffs;
  633         int count, i, fseg;
  634 
  635         /* Nothing to do. */
  636         if (length == 0)
  637                 return (0);
  638 
  639         /* Figure out how many segments '*slice' needs to have. */
  640         end = offset + length;
  641         space = 0;
  642         count = 0;
  643         fseg = 0;
  644         foffs = loffs = 0;
  645         for (i = 0; i < original->sg_nseg; i++) {
  646                 space += original->sg_segs[i].ss_len;
  647                 if (space > offset) {
  648                         /*
  649                          * When we hit the first segment, store its index
  650                          * in 'fseg' and the offset into the first segment
  651                          * of 'offset' in 'foffs'.
  652                          */
  653                         if (count == 0) {
  654                                 fseg = i;
  655                                 foffs = offset - (space -
  656                                     original->sg_segs[i].ss_len);
  657                                 CTR1(KTR_DEV, "sglist_slice: foffs = %08lx",
  658                                     foffs);
  659                         }
  660                         count++;
  661 
  662                         /*
  663                          * When we hit the last segment, break out of
  664                          * the loop.  Store the amount of extra space
  665                          * at the end of this segment in 'loffs'.
  666                          */
  667                         if (space >= end) {
  668                                 loffs = space - end;
  669                                 CTR1(KTR_DEV, "sglist_slice: loffs = %08lx",
  670                                     loffs);
  671                                 break;
  672                         }
  673                 }
  674         }
  675 
  676         /* If we never hit 'end', then 'length' ran off the end, so fail. */
  677         if (space < end)
  678                 return (EINVAL);
  679 
  680         if (*slice == NULL) {
  681                 sg = sglist_alloc(count, mflags);
  682                 if (sg == NULL)
  683                         return (ENOMEM);
  684                 *slice = sg;
  685         } else {
  686                 sg = *slice;
  687                 if (sg->sg_maxseg < count)
  688                         return (EFBIG);
  689                 if (sg->sg_nseg != 0)
  690                         return (EINVAL);
  691         }
  692 
  693         /*
  694          * Copy over 'count' segments from 'original' starting at
  695          * 'fseg' to 'sg'.
  696          */
  697         bcopy(original->sg_segs + fseg, sg->sg_segs,
  698             count * sizeof(struct sglist_seg));
  699         sg->sg_nseg = count;
  700 
  701         /* Fixup first and last segments if needed. */
  702         if (foffs != 0) {
  703                 sg->sg_segs[0].ss_paddr += foffs;
  704                 sg->sg_segs[0].ss_len -= foffs;
  705                 CTR2(KTR_DEV, "sglist_slice seg[0]: %08lx:%08lx",
  706                     (long)sg->sg_segs[0].ss_paddr, sg->sg_segs[0].ss_len);
  707         }
  708         if (loffs != 0) {
  709                 sg->sg_segs[count - 1].ss_len -= loffs;
  710                 CTR2(KTR_DEV, "sglist_slice seg[%d]: len %08x", count - 1,
  711                     sg->sg_segs[count - 1].ss_len);
  712         }
  713         return (0);
  714 }

Cache object: fb4e41f9f7abedcf56efcc3e3d119539


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.