The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/vinum/vinumrequest.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, 1998, 1999
    3  *  Nan Yang Computer Services Limited.  All rights reserved.
    4  *
    5  *  Parts copyright (c) 1997, 1998 Cybernet Corporation, NetMAX project.
    6  *
    7  *  Written by Greg Lehey
    8  *
    9  *  This software is distributed under the so-called ``Berkeley
   10  *  License'':
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. All advertising materials mentioning features or use of this software
   21  *    must display the following acknowledgement:
   22  *      This product includes software developed by Nan Yang Computer
   23  *      Services Limited.
   24  * 4. Neither the name of the Company nor the names of its contributors
   25  *    may be used to endorse or promote products derived from this software
   26  *    without specific prior written permission.
   27  *
   28  * This software is provided ``as is'', and any express or implied
   29  * warranties, including, but not limited to, the implied warranties of
   30  * merchantability and fitness for a particular purpose are disclaimed.
   31  * In no event shall the company or contributors be liable for any
   32  * direct, indirect, incidental, special, exemplary, or consequential
   33  * damages (including, but not limited to, procurement of substitute
   34  * goods or services; loss of use, data, or profits; or business
   35  * interruption) however caused and on any theory of liability, whether
   36  * in contract, strict liability, or tort (including negligence or
   37  * otherwise) arising in any way out of the use of this software, even if
   38  * advised of the possibility of such damage.
   39  *
   40  * $Id: vinumrequest.c,v 1.32 2001/05/23 23:04:38 grog Exp grog $
   41  * $FreeBSD: releng/5.0/sys/dev/vinum/vinumrequest.c 106583 2002-11-07 21:52:51Z jhb $
   42  */
   43 
   44 #include <dev/vinum/vinumhdr.h>
   45 #include <dev/vinum/request.h>
   46 #include <sys/resourcevar.h>
   47 
   48 enum requeststatus bre(struct request *rq,
   49     int plexno,
   50     daddr_t * diskstart,
   51     daddr_t diskend);
   52 enum requeststatus bre5(struct request *rq,
   53     int plexno,
   54     daddr_t * diskstart,
   55     daddr_t diskend);
   56 enum requeststatus build_read_request(struct request *rq, int volplexno);
   57 enum requeststatus build_write_request(struct request *rq);
   58 enum requeststatus build_rq_buffer(struct rqelement *rqe, struct plex *plex);
   59 int find_alternate_sd(struct request *rq);
   60 int check_range_covered(struct request *);
   61 void complete_rqe(struct buf *bp);
   62 void complete_raid5_write(struct rqelement *);
   63 int abortrequest(struct request *rq, int error);
   64 void sdio_done(struct buf *bp);
   65 int vinum_bounds_check(struct buf *bp, struct volume *vol);
   66 caddr_t allocdatabuf(struct rqelement *rqe);
   67 void freedatabuf(struct rqelement *rqe);
   68 
   69 #ifdef VINUMDEBUG
   70 struct rqinfo rqinfo[RQINFO_SIZE];
   71 struct rqinfo *rqip = rqinfo;
   72 
   73 void
   74 logrq(enum rqinfo_type type, union rqinfou info, struct buf *ubp)
   75 {
   76     int s = splhigh();
   77 
   78     microtime(&rqip->timestamp);                            /* when did this happen? */
   79     rqip->type = type;
   80     rqip->bp = ubp;                                         /* user buffer */
   81     switch (type) {
   82     case loginfo_user_bp:
   83     case loginfo_user_bpl:
   84     case loginfo_sdio:                                      /* subdisk I/O */
   85     case loginfo_sdiol:                                     /* subdisk I/O launch */
   86     case loginfo_sdiodone:                                  /* subdisk I/O complete */
   87         bcopy(info.bp, &rqip->info.b, sizeof(struct buf));
   88         rqip->devmajor = major(info.bp->b_dev);
   89         rqip->devminor = minor(info.bp->b_dev);
   90         break;
   91 
   92     case loginfo_iodone:
   93     case loginfo_rqe:
   94     case loginfo_raid5_data:
   95     case loginfo_raid5_parity:
   96         bcopy(info.rqe, &rqip->info.rqe, sizeof(struct rqelement));
   97         rqip->devmajor = major(info.rqe->b.b_dev);
   98         rqip->devminor = minor(info.rqe->b.b_dev);
   99         break;
  100 
  101     case loginfo_lockwait:
  102     case loginfo_lock:
  103     case loginfo_unlock:
  104         bcopy(info.lockinfo, &rqip->info.lockinfo, sizeof(struct rangelock));
  105 
  106         break;
  107 
  108     case loginfo_unused:
  109         break;
  110     }
  111     rqip++;
  112     if (rqip >= &rqinfo[RQINFO_SIZE])                       /* wrap around */
  113         rqip = rqinfo;
  114     splx(s);
  115 }
  116 
  117 #endif
  118 
  119 void
  120 vinumstrategy(struct bio *biop)
  121 {
  122     struct buf *bp = (struct buf *) biop;
  123     int volno;
  124     struct volume *vol = NULL;
  125 
  126     switch (DEVTYPE(bp->b_dev)) {
  127     case VINUM_SD_TYPE:
  128     case VINUM_RAWSD_TYPE:
  129         sdio(bp);
  130         return;
  131 
  132         /*
  133          * In fact, vinum doesn't handle drives: they're
  134          * handled directly by the disk drivers
  135          */
  136     case VINUM_DRIVE_TYPE:
  137     default:
  138         bp->b_error = EIO;                                  /* I/O error */
  139         bp->b_io.bio_flags |= BIO_ERROR;
  140         bufdone(bp);
  141         return;
  142 
  143     case VINUM_VOLUME_TYPE:                                 /* volume I/O */
  144         volno = Volno(bp->b_dev);
  145         vol = &VOL[volno];
  146         if (vol->state != volume_up) {                      /* can't access this volume */
  147             bp->b_error = EIO;                              /* I/O error */
  148             bp->b_io.bio_flags |= BIO_ERROR;
  149             bufdone(bp);
  150             return;
  151         }
  152         if (vinum_bounds_check(bp, vol) <= 0) {             /* don't like them bounds */
  153             bufdone(bp);
  154             return;
  155         }
  156         /* FALLTHROUGH */
  157         /*
  158          * Plex I/O is pretty much the same as volume I/O
  159          * for a single plex.  Indicate this by passing a NULL
  160          * pointer (set above) for the volume
  161          */
  162     case VINUM_PLEX_TYPE:
  163     case VINUM_RAWPLEX_TYPE:
  164         bp->b_resid = bp->b_bcount;                         /* transfer everything */
  165         vinumstart(bp, 0);
  166         return;
  167     }
  168 }
  169 
  170 /*
  171  * Start a transfer.  Return -1 on error, 0 if OK,
  172  * 1 if we need to retry.  Parameter reviveok is
  173  * set when doing transfers for revives: it allows
  174  * transfers to be started immediately when a
  175  * revive is in progress.  During revive, normal
  176  * transfers are queued if they share address
  177  * space with a currently active revive operation.
  178  */
  179 int
  180 vinumstart(struct buf *bp, int reviveok)
  181 {
  182     int plexno;
  183     int maxplex;                                            /* maximum number of plexes to handle */
  184     struct volume *vol;
  185     struct request *rq;                                     /* build up our request here */
  186     enum requeststatus status;
  187 
  188 #ifdef VINUMDEBUG
  189     if (debug & DEBUG_LASTREQS)
  190         logrq(loginfo_user_bp, (union rqinfou) bp, bp);
  191 #endif
  192 
  193     if ((bp->b_bcount % DEV_BSIZE) != 0) {                  /* bad length */
  194         bp->b_error = EINVAL;                               /* invalid size */
  195         bp->b_io.bio_flags |= BIO_ERROR;
  196         bufdone(bp);
  197         return -1;
  198     }
  199     rq = (struct request *) Malloc(sizeof(struct request)); /* allocate a request struct */
  200     if (rq == NULL) {                                       /* can't do it */
  201         bp->b_error = ENOMEM;                               /* can't get memory */
  202         bp->b_io.bio_flags |= BIO_ERROR;
  203         bufdone(bp);
  204         return -1;
  205     }
  206     bzero(rq, sizeof(struct request));
  207 
  208     /*
  209      * Note the volume ID.  This can be NULL, which
  210      * the request building functions use as an
  211      * indication for single plex I/O.
  212      */
  213     rq->bp = bp;                                            /* and the user buffer struct */
  214 
  215     if (DEVTYPE(bp->b_dev) == VINUM_VOLUME_TYPE) {          /* it's a volume, */
  216         rq->volplex.volno = Volno(bp->b_dev);               /* get the volume number */
  217         vol = &VOL[rq->volplex.volno];                      /* and point to it */
  218         vol->active++;                                      /* one more active request */
  219         maxplex = vol->plexes;                              /* consider all its plexes */
  220     } else {
  221         vol = NULL;                                         /* no volume */
  222         rq->volplex.plexno = Plexno(bp->b_dev);             /* point to the plex */
  223         rq->isplex = 1;                                     /* note that it's a plex */
  224         maxplex = 1;                                        /* just the one plex */
  225     }
  226 
  227     if (bp->b_iocmd == BIO_READ) {
  228         /*
  229          * This is a read request.  Decide
  230          * which plex to read from.
  231          *
  232          * There's a potential race condition here,
  233          * since we're not locked, and we could end
  234          * up multiply incrementing the round-robin
  235          * counter.  This doesn't have any serious
  236          * effects, however.
  237          */
  238         if (vol != NULL) {
  239             plexno = vol->preferred_plex;                   /* get the plex to use */
  240             if (plexno < 0) {                               /* round robin */
  241                 plexno = vol->last_plex_read;
  242                 vol->last_plex_read++;
  243                 if (vol->last_plex_read >= vol->plexes)     /* got the the end? */
  244                     vol->last_plex_read = 0;                /* wrap around */
  245             }
  246             status = build_read_request(rq, plexno);        /* build a request */
  247         } else {
  248             daddr_t diskaddr = bp->b_blkno;                 /* start offset of transfer */
  249             status = bre(rq,                                /* build a request list */
  250                 rq->volplex.plexno,
  251                 &diskaddr,
  252                 diskaddr + (bp->b_bcount / DEV_BSIZE));
  253         }
  254 
  255         if (status > REQUEST_RECOVERED) {                   /* can't satisfy it */
  256             if (status == REQUEST_DOWN) {                   /* not enough subdisks */
  257                 bp->b_error = EIO;                          /* I/O error */
  258                 bp->b_io.bio_flags |= BIO_ERROR;
  259             }
  260             bufdone(bp);
  261             freerq(rq);
  262             return -1;
  263         }
  264         return launch_requests(rq, reviveok);               /* now start the requests if we can */
  265     } else
  266         /*
  267          * This is a write operation.  We write to all plexes.  If this is
  268          * a RAID-4 or RAID-5 plex, we must also update the parity stripe.
  269          */
  270     {
  271         if (vol != NULL) {
  272             if ((vol->plexes > 0)                           /* multiple plex */
  273             ||(isparity((&PLEX[vol->plex[0]])))) {          /* or RAID-[45], */
  274                 rq->save_data = bp->b_data;                 /* save the data buffer address */
  275                 bp->b_data = Malloc(bp->b_bufsize);
  276                 bcopy(rq->save_data, bp->b_data, bp->b_bufsize); /* make a copy */
  277                 rq->flags |= XFR_COPYBUF;                   /* and note that we did it */
  278             }
  279             status = build_write_request(rq);
  280         } else {                                            /* plex I/O */
  281             daddr_t diskstart;
  282 
  283             diskstart = bp->b_blkno;                        /* start offset of transfer */
  284             status = bre(rq,
  285                 Plexno(bp->b_dev),
  286                 &diskstart,
  287                 bp->b_blkno + (bp->b_bcount / DEV_BSIZE));  /* build requests for the plex */
  288         }
  289         if (status > REQUEST_RECOVERED) {                   /* can't satisfy it */
  290             if (status == REQUEST_DOWN) {                   /* not enough subdisks */
  291                 bp->b_error = EIO;                          /* I/O error */
  292                 bp->b_io.bio_flags |= BIO_ERROR;
  293             }
  294             if (rq->flags & XFR_COPYBUF) {
  295                 Free(bp->b_data);
  296                 bp->b_data = rq->save_data;
  297             }
  298             bufdone(bp);
  299             freerq(rq);
  300             return -1;
  301         }
  302         return launch_requests(rq, reviveok);               /* now start the requests if we can */
  303     }
  304 }
  305 
  306 /*
  307  * Call the low-level strategy routines to
  308  * perform the requests in a struct request
  309  */
  310 int
  311 launch_requests(struct request *rq, int reviveok)
  312 {
  313     struct rqgroup *rqg;
  314     int rqno;                                               /* loop index */
  315     struct rqelement *rqe;                                  /* current element */
  316     struct drive *drive;
  317     int rcount;                                             /* request count */
  318 
  319     /*
  320      * First find out whether we're reviving, and the
  321      * request contains a conflict.  If so, we hang
  322      * the request off plex->waitlist of the first
  323      * plex we find which is reviving
  324      */
  325 
  326     if ((rq->flags & XFR_REVIVECONFLICT)                    /* possible revive conflict */
  327     &&(!reviveok)) {                                        /* and we don't want to do it now, */
  328         struct sd *sd;
  329         struct request *waitlist;                           /* point to the waitlist */
  330 
  331         sd = &SD[rq->sdno];
  332         if (sd->waitlist != NULL) {                         /* something there already, */
  333             waitlist = sd->waitlist;
  334             while (waitlist->next != NULL)                  /* find the end */
  335                 waitlist = waitlist->next;
  336             waitlist->next = rq;                            /* hook our request there */
  337         } else
  338             sd->waitlist = rq;                              /* hook our request at the front */
  339 
  340 #ifdef VINUMDEBUG
  341         if (debug & DEBUG_REVIVECONFLICT)
  342             log(LOG_DEBUG,
  343                 "Revive conflict sd %d: %p\n%s dev %d.%d, offset 0x%llx, length %ld\n",
  344                 rq->sdno,
  345                 rq,
  346                 rq->bp->b_iocmd == BIO_READ ? "Read" : "Write",
  347                 major(rq->bp->b_dev),
  348                 minor(rq->bp->b_dev),
  349                 (long long)rq->bp->b_blkno,
  350                 rq->bp->b_bcount);
  351 #endif
  352         return 0;                                           /* and get out of here */
  353     }
  354     rq->active = 0;                                         /* nothing yet */
  355 #ifdef VINUMDEBUG
  356     if (debug & DEBUG_ADDRESSES)
  357         log(LOG_DEBUG,
  358             "Request: %p\n%s dev %d.%d, offset 0x%llx, length %ld\n",
  359             rq,
  360             rq->bp->b_iocmd == BIO_READ ? "Read" : "Write",
  361             major(rq->bp->b_dev),
  362             minor(rq->bp->b_dev),
  363             (long long)rq->bp->b_blkno,
  364             rq->bp->b_bcount);
  365     vinum_conf.lastrq = rq;
  366     vinum_conf.lastbuf = rq->bp;
  367     if (debug & DEBUG_LASTREQS)
  368         logrq(loginfo_user_bpl, (union rqinfou) rq->bp, rq->bp);
  369 #endif
  370 
  371     /*
  372      * We used to have an splbio() here anyway, out
  373      * of superstition.  With the division of labour
  374      * below (first count the requests, then issue
  375      * them), it looks as if we don't need this
  376      * splbio() protection.  In fact, as dillon
  377      * points out, there's a race condition
  378      * incrementing and decrementing rq->active and
  379      * rqg->active.  This splbio() didn't help
  380      * there, because the device strategy routine
  381      * can sleep.  Solve this by putting shorter
  382      * duration locks on the code.
  383      */
  384     /*
  385      * This loop happens without any participation
  386      * of the bottom half, so it requires no
  387      * protection.
  388      */
  389     for (rqg = rq->rqg; rqg != NULL; rqg = rqg->next) {     /* through the whole request chain */
  390         rqg->active = rqg->count;                           /* they're all active */
  391         for (rqno = 0; rqno < rqg->count; rqno++) {
  392             rqe = &rqg->rqe[rqno];
  393             if (rqe->flags & XFR_BAD_SUBDISK)               /* this subdisk is bad, */
  394                 rqg->active--;                              /* one less active request */
  395         }
  396         if (rqg->active)                                    /* we have at least one active request, */
  397             rq->active++;                                   /* one more active request group */
  398     }
  399 
  400     /*
  401      * Now fire off the requests.  In this loop the
  402      * bottom half could be completing requests
  403      * before we finish, so we need splbio() protection.
  404      */
  405     for (rqg = rq->rqg; rqg != NULL;) {                     /* through the whole request chain */
  406         if (rqg->lockbase >= 0)                             /* this rqg needs a lock first */
  407             rqg->lock = lockrange(rqg->lockbase, rqg->rq->bp, &PLEX[rqg->plexno]);
  408         rcount = rqg->count;
  409         for (rqno = 0; rqno < rcount;) {
  410             rqe = &rqg->rqe[rqno];
  411 
  412             /*
  413              * Point to next rqg before the bottom end
  414              * changes the structures.
  415              */
  416             if (++rqno >= rcount)
  417                 rqg = rqg->next;
  418             if ((rqe->flags & XFR_BAD_SUBDISK) == 0) {      /* this subdisk is good, */
  419                 drive = &DRIVE[rqe->driveno];               /* look at drive */
  420                 drive->active++;
  421                 if (drive->active >= drive->maxactive)
  422                     drive->maxactive = drive->active;
  423                 vinum_conf.active++;
  424                 if (vinum_conf.active >= vinum_conf.maxactive)
  425                     vinum_conf.maxactive = vinum_conf.active;
  426 
  427 #ifdef VINUMDEBUG
  428                 if (debug & DEBUG_ADDRESSES)
  429                     log(LOG_DEBUG,
  430                         "  %s dev %d.%d, sd %d, offset 0x%x, devoffset 0x%llx, length %ld\n",
  431                         rqe->b.b_iocmd == BIO_READ ? "Read" : "Write",
  432                         major(rqe->b.b_dev),
  433                         minor(rqe->b.b_dev),
  434                         rqe->sdno,
  435                         (u_int) (rqe->b.b_blkno - SD[rqe->sdno].driveoffset),
  436                         (long long)rqe->b.b_blkno,
  437                         rqe->b.b_bcount);
  438                 if (debug & DEBUG_LASTREQS)
  439                     logrq(loginfo_rqe, (union rqinfou) rqe, rq->bp);
  440 #endif
  441                 /* fire off the request */
  442                 DEV_STRATEGY(&rqe->b, 0);
  443             }
  444         }
  445     }
  446     return 0;
  447 }
  448 
  449 /*
  450  * define the low-level requests needed to perform a
  451  * high-level I/O operation for a specific plex 'plexno'.
  452  *
  453  * Return REQUEST_OK if all subdisks involved in the request are up,
  454  * REQUEST_DOWN if some subdisks are not up, and REQUEST_EOF if the
  455  * request is at least partially outside the bounds of the subdisks.
  456  *
  457  * Modify the pointer *diskstart to point to the end address.  On
  458  * read, return on the first bad subdisk, so that the caller
  459  * (build_read_request) can try alternatives.
  460  *
  461  * On entry to this routine, the rqg structures are not assigned.  The
  462  * assignment is performed by expandrq().  Strictly speaking, the
  463  * elements rqe->sdno of all entries should be set to -1, since 0
  464  * (from bzero) is a valid subdisk number.  We avoid this problem by
  465  * initializing the ones we use, and not looking at the others (index
  466  * >= rqg->requests).
  467  */
  468 enum requeststatus
  469 bre(struct request *rq,
  470     int plexno,
  471     daddr_t * diskaddr,
  472     daddr_t diskend)
  473 {
  474     int sdno;
  475     struct sd *sd;
  476     struct rqgroup *rqg;
  477     struct buf *bp;                                         /* user's bp */
  478     struct plex *plex;
  479     enum requeststatus status;                              /* return value */
  480     daddr_t plexoffset;                                     /* offset of transfer in plex */
  481     daddr_t stripebase;                                     /* base address of stripe (1st subdisk) */
  482     daddr_t stripeoffset;                                   /* offset in stripe */
  483     daddr_t blockoffset;                                    /* offset in stripe on subdisk */
  484     struct rqelement *rqe;                                  /* point to this request information */
  485     daddr_t diskstart = *diskaddr;                          /* remember where this transfer starts */
  486     enum requeststatus s;                                   /* temp return value */
  487 
  488     bp = rq->bp;                                            /* buffer pointer */
  489     status = REQUEST_OK;                                    /* return value: OK until proven otherwise */
  490     plex = &PLEX[plexno];                                   /* point to the plex */
  491 
  492     switch (plex->organization) {
  493     case plex_concat:
  494         sd = NULL;                                          /* (keep compiler quiet) */
  495         for (sdno = 0; sdno < plex->subdisks; sdno++) {
  496             sd = &SD[plex->sdnos[sdno]];
  497             if (*diskaddr < sd->plexoffset)                 /* we must have a hole, */
  498                 status = REQUEST_DEGRADED;                  /* note the fact */
  499             if (*diskaddr < (sd->plexoffset + sd->sectors)) { /* the request starts in this subdisk */
  500                 rqg = allocrqg(rq, 1);                      /* space for the request */
  501                 if (rqg == NULL) {                          /* malloc failed */
  502                     bp->b_error = ENOMEM;
  503                     bp->b_io.bio_flags |= BIO_ERROR;
  504                     return REQUEST_ENOMEM;
  505                 }
  506                 rqg->plexno = plexno;
  507 
  508                 rqe = &rqg->rqe[0];                         /* point to the element */
  509                 rqe->rqg = rqg;                             /* group */
  510                 rqe->sdno = sd->sdno;                       /* put in the subdisk number */
  511                 plexoffset = *diskaddr;                     /* start offset in plex */
  512                 rqe->sdoffset = plexoffset - sd->plexoffset; /* start offset in subdisk */
  513                 rqe->useroffset = plexoffset - diskstart;   /* start offset in user buffer */
  514                 rqe->dataoffset = 0;
  515                 rqe->datalen = min(diskend - *diskaddr,     /* number of sectors to transfer in this sd */
  516                     sd->sectors - rqe->sdoffset);
  517                 rqe->groupoffset = 0;                       /* no groups for concatenated plexes */
  518                 rqe->grouplen = 0;
  519                 rqe->buflen = rqe->datalen;                 /* buffer length is data buffer length */
  520                 rqe->flags = 0;
  521                 rqe->driveno = sd->driveno;
  522                 if (sd->state != sd_up) {                   /* *now* we find the sd is down */
  523                     s = checksdstate(sd, rq, *diskaddr, diskend); /* do we need to change state? */
  524                     if (s == REQUEST_DOWN) {                /* down? */
  525                         rqe->flags = XFR_BAD_SUBDISK;       /* yup */
  526                         if (rq->bp->b_iocmd == BIO_READ)    /* read request, */
  527                             return REQUEST_DEGRADED;        /* give up here */
  528                         /*
  529                          * If we're writing, don't give up
  530                          * because of a bad subdisk.  Go
  531                          * through to the bitter end, but note
  532                          * which ones we can't access.
  533                          */
  534                         status = REQUEST_DEGRADED;          /* can't do it all */
  535                     }
  536                 }
  537                 *diskaddr += rqe->datalen;                  /* bump the address */
  538                 if (build_rq_buffer(rqe, plex)) {           /* build the buffer */
  539                     deallocrqg(rqg);
  540                     bp->b_error = ENOMEM;
  541                     bp->b_io.bio_flags |= BIO_ERROR;
  542                     return REQUEST_ENOMEM;                  /* can't do it */
  543                 }
  544             }
  545             if (*diskaddr == diskend)                       /* we're finished, */
  546                 break;                                      /* get out of here */
  547         }
  548         /*
  549          * We've got to the end of the plex.  Have we got to the end of
  550          * the transfer?  It would seem that having an offset beyond the
  551          * end of the subdisk is an error, but in fact it can happen if
  552          * the volume has another plex of different size.  There's a valid
  553          * question as to why you would want to do this, but currently
  554          * it's allowed.
  555          *
  556          * In a previous version, I returned REQUEST_DOWN here.  I think
  557          * REQUEST_EOF is more appropriate now.
  558          */
  559         if (diskend > sd->sectors + sd->plexoffset)         /* pointing beyond EOF? */
  560             status = REQUEST_EOF;
  561         break;
  562 
  563     case plex_striped:
  564         {
  565             while (*diskaddr < diskend) {                   /* until we get it all sorted out */
  566                 if (*diskaddr >= plex->length)              /* beyond the end of the plex */
  567                     return REQUEST_EOF;                     /* can't continue */
  568 
  569                 /* The offset of the start address from the start of the stripe. */
  570                 stripeoffset = *diskaddr % (plex->stripesize * plex->subdisks);
  571 
  572                 /* The plex-relative address of the start of the stripe. */
  573                 stripebase = *diskaddr - stripeoffset;
  574 
  575                 /* The number of the subdisk in which the start is located. */
  576                 sdno = stripeoffset / plex->stripesize;
  577 
  578                 /* The offset from the beginning of the stripe on this subdisk. */
  579                 blockoffset = stripeoffset % plex->stripesize;
  580 
  581                 sd = &SD[plex->sdnos[sdno]];                /* the subdisk in question */
  582                 rqg = allocrqg(rq, 1);                      /* space for the request */
  583                 if (rqg == NULL) {                          /* malloc failed */
  584                     bp->b_error = ENOMEM;
  585                     bp->b_io.bio_flags |= BIO_ERROR;
  586                     return REQUEST_ENOMEM;
  587                 }
  588                 rqg->plexno = plexno;
  589 
  590                 rqe = &rqg->rqe[0];                         /* point to the element */
  591                 rqe->rqg = rqg;
  592                 rqe->sdoffset = stripebase / plex->subdisks + blockoffset; /* start offset in this subdisk */
  593                 rqe->useroffset = *diskaddr - diskstart;    /* The offset of the start in the user buffer */
  594                 rqe->dataoffset = 0;
  595                 rqe->datalen = min(diskend - *diskaddr,     /* the amount remaining to transfer */
  596                     plex->stripesize - blockoffset);        /* and the amount left in this stripe */
  597                 rqe->groupoffset = 0;                       /* no groups for striped plexes */
  598                 rqe->grouplen = 0;
  599                 rqe->buflen = rqe->datalen;                 /* buffer length is data buffer length */
  600                 rqe->flags = 0;
  601                 rqe->sdno = sd->sdno;                       /* put in the subdisk number */
  602                 rqe->driveno = sd->driveno;
  603 
  604                 if (sd->state != sd_up) {                   /* *now* we find the sd is down */
  605                     s = checksdstate(sd, rq, *diskaddr, diskend); /* do we need to change state? */
  606                     if (s == REQUEST_DOWN) {                /* down? */
  607                         rqe->flags = XFR_BAD_SUBDISK;       /* yup */
  608                         if (rq->bp->b_iocmd == BIO_READ)    /* read request, */
  609                             return REQUEST_DEGRADED;        /* give up here */
  610                         /*
  611                          * If we're writing, don't give up
  612                          * because of a bad subdisk.  Go through
  613                          * to the bitter end, but note which
  614                          * ones we can't access.
  615                          */
  616                         status = REQUEST_DEGRADED;          /* can't do it all */
  617                     }
  618                 }
  619                 /*
  620                  * It would seem that having an offset
  621                  * beyond the end of the subdisk is an
  622                  * error, but in fact it can happen if the
  623                  * volume has another plex of different
  624                  * size.  There's a valid question as to why
  625                  * you would want to do this, but currently
  626                  * it's allowed.
  627                  */
  628                 if (rqe->sdoffset + rqe->datalen > sd->sectors) { /* ends beyond the end of the subdisk? */
  629                     rqe->datalen = sd->sectors - rqe->sdoffset; /* truncate */
  630 #ifdef VINUMDEBUG
  631                     if (debug & DEBUG_EOFINFO) {            /* tell on the request */
  632                         log(LOG_DEBUG,
  633                             "vinum: EOF on plex %s, sd %s offset %x (user offset %llx)\n",
  634                             plex->name,
  635                             sd->name,
  636                             (u_int) sd->sectors,
  637                             (long long)bp->b_blkno);
  638                         log(LOG_DEBUG,
  639                             "vinum: stripebase %#llx, stripeoffset %#llxx, blockoffset %#llx\n",
  640                             (unsigned long long)stripebase,
  641                             (unsigned long long)stripeoffset,
  642                             (unsigned long long)blockoffset);
  643                     }
  644 #endif
  645                 }
  646                 if (build_rq_buffer(rqe, plex)) {           /* build the buffer */
  647                     deallocrqg(rqg);
  648                     bp->b_error = ENOMEM;
  649                     bp->b_io.bio_flags |= BIO_ERROR;
  650                     return REQUEST_ENOMEM;                  /* can't do it */
  651                 }
  652                 *diskaddr += rqe->datalen;                  /* look at the remainder */
  653                 if ((*diskaddr < diskend)                   /* didn't finish the request on this stripe */
  654                 &&(*diskaddr < plex->length)) {             /* and there's more to come */
  655                     plex->multiblock++;                     /* count another one */
  656                     if (sdno == plex->subdisks - 1)         /* last subdisk, */
  657                         plex->multistripe++;                /* another stripe as well */
  658                 }
  659             }
  660         }
  661         break;
  662 
  663         /*
  664          * RAID-4 and RAID-5 are complicated enough to have their own
  665          * function.
  666          */
  667     case plex_raid4:
  668     case plex_raid5:
  669         status = bre5(rq, plexno, diskaddr, diskend);
  670         break;
  671 
  672     default:
  673         log(LOG_ERR, "vinum: invalid plex type %d in bre\n", plex->organization);
  674         status = REQUEST_DOWN;                              /* can't access it */
  675     }
  676 
  677     return status;
  678 }
  679 
  680 /*
  681  * Build up a request structure for reading volumes.
  682  * This function is not needed for plex reads, since there's
  683  * no recovery if a plex read can't be satisified.
  684  */
  685 enum requeststatus
  686 build_read_request(struct request *rq,                      /* request */
  687     int plexindex)
  688 {                                                           /* index in the volume's plex table */
  689     struct buf *bp;
  690     daddr_t startaddr;                                      /* offset of previous part of transfer */
  691     daddr_t diskaddr;                                       /* offset of current part of transfer */
  692     daddr_t diskend;                                        /* and end offset of transfer */
  693     int plexno;                                             /* plex index in vinum_conf */
  694     struct rqgroup *rqg;                                    /* point to the request we're working on */
  695     struct volume *vol;                                     /* volume in question */
  696     int recovered = 0;                                      /* set if we recover a read */
  697     enum requeststatus status = REQUEST_OK;
  698     int plexmask;                                           /* bit mask of plexes, for recovery */
  699 
  700     bp = rq->bp;                                            /* buffer pointer */
  701     diskaddr = bp->b_blkno;                                 /* start offset of transfer */
  702     diskend = diskaddr + (bp->b_bcount / DEV_BSIZE);        /* and end offset of transfer */
  703     rqg = &rq->rqg[plexindex];                              /* plex request */
  704     vol = &VOL[rq->volplex.volno];                          /* point to volume */
  705 
  706     while (diskaddr < diskend) {                            /* build up request components */
  707         startaddr = diskaddr;
  708         status = bre(rq, vol->plex[plexindex], &diskaddr, diskend); /* build up a request */
  709         switch (status) {
  710         case REQUEST_OK:
  711             continue;
  712 
  713         case REQUEST_RECOVERED:
  714             /*
  715              * XXX FIXME if we have more than one plex, and we can
  716              * satisfy the request from another, don't use the
  717              * recovered request, since it's more expensive.
  718              */
  719             recovered = 1;
  720             break;
  721 
  722         case REQUEST_ENOMEM:
  723             return status;
  724             /*
  725              * If we get here, our request is not complete.  Try
  726              * to fill in the missing parts from another plex.
  727              * This can happen multiple times in this function,
  728              * and we reinitialize the plex mask each time, since
  729              * we could have a hole in our plexes.
  730              */
  731         case REQUEST_EOF:
  732         case REQUEST_DOWN:                                  /* can't access the plex */
  733         case REQUEST_DEGRADED:                              /* can't access the plex */
  734             plexmask = ((1 << vol->plexes) - 1)             /* all plexes in the volume */
  735             &~(1 << plexindex);                             /* except for the one we were looking at */
  736             for (plexno = 0; plexno < vol->plexes; plexno++) {
  737                 if (plexmask == 0)                          /* no plexes left to try */
  738                     return REQUEST_DOWN;                    /* failed */
  739                 diskaddr = startaddr;                       /* start at the beginning again */
  740                 if (plexmask & (1 << plexno)) {             /* we haven't tried this plex yet */
  741                     bre(rq, vol->plex[plexno], &diskaddr, diskend); /* try a request */
  742                     if (diskaddr > startaddr) {             /* we satisfied another part */
  743                         recovered = 1;                      /* we recovered from the problem */
  744                         status = REQUEST_OK;                /* don't complain about it */
  745                         break;
  746                     }
  747                 }
  748             }
  749             if (diskaddr == startaddr)                      /* didn't get any further, */
  750                 return status;
  751         }
  752         if (recovered)
  753             vol->recovered_reads += recovered;              /* adjust our recovery count */
  754     }
  755     return status;
  756 }
  757 
  758 /*
  759  * Build up a request structure for writes.
  760  * Return 0 if all subdisks involved in the request are up, 1 if some
  761  * subdisks are not up, and -1 if the request is at least partially
  762  * outside the bounds of the subdisks.
  763  */
  764 enum requeststatus
  765 build_write_request(struct request *rq)
  766 {                                                           /* request */
  767     struct buf *bp;
  768     daddr_t diskstart;                                      /* offset of current part of transfer */
  769     daddr_t diskend;                                        /* and end offset of transfer */
  770     int plexno;                                             /* plex index in vinum_conf */
  771     struct volume *vol;                                     /* volume in question */
  772     enum requeststatus status;
  773 
  774     bp = rq->bp;                                            /* buffer pointer */
  775     vol = &VOL[rq->volplex.volno];                          /* point to volume */
  776     diskend = bp->b_blkno + (bp->b_bcount / DEV_BSIZE);     /* end offset of transfer */
  777     status = REQUEST_DOWN;                                  /* assume the worst */
  778     for (plexno = 0; plexno < vol->plexes; plexno++) {
  779         diskstart = bp->b_blkno;                            /* start offset of transfer */
  780         /*
  781          * Build requests for the plex.
  782          * We take the best possible result here (min,
  783          * not max): we're happy if we can write at all
  784          */
  785         status = min(status, bre(rq,
  786                 vol->plex[plexno],
  787                 &diskstart,
  788                 diskend));
  789     }
  790     return status;
  791 }
  792 
  793 /* Fill in the struct buf part of a request element. */
  794 enum requeststatus
  795 build_rq_buffer(struct rqelement *rqe, struct plex *plex)
  796 {
  797     struct sd *sd;                                          /* point to subdisk */
  798     struct volume *vol;
  799     struct buf *bp;
  800     struct buf *ubp;                                        /* user (high level) buffer header */
  801 
  802     vol = &VOL[rqe->rqg->rq->volplex.volno];
  803     sd = &SD[rqe->sdno];                                    /* point to subdisk */
  804     bp = &rqe->b;
  805     ubp = rqe->rqg->rq->bp;                                 /* pointer to user buffer header */
  806 
  807     /* Initialize the buf struct */
  808     /* copy these flags from user bp */
  809     bp->b_flags = ubp->b_flags & (B_NOCACHE | B_ASYNC);
  810     bp->b_io.bio_flags = 0;
  811     bp->b_iocmd = ubp->b_iocmd;
  812 #ifdef VINUMDEBUG
  813     if (rqe->flags & XFR_BUFLOCKED)                         /* paranoia */
  814         panic("build_rq_buffer: rqe already locked");       /* XXX remove this when we're sure */
  815 #endif
  816     BUF_LOCKINIT(bp);                                       /* get a lock for the buffer */
  817     BUF_LOCK(bp, LK_EXCLUSIVE);                             /* and lock it */
  818     BUF_KERNPROC(bp);
  819     rqe->flags |= XFR_BUFLOCKED;
  820     bp->b_iodone = complete_rqe;
  821     /*
  822      * You'd think that we wouldn't need to even
  823      * build the request buffer for a dead subdisk,
  824      * but in some cases we need information like
  825      * the user buffer address.  Err on the side of
  826      * generosity and supply what we can.  That
  827      * obviously doesn't include drive information
  828      * when the drive is dead.
  829      */
  830     if ((rqe->flags & XFR_BAD_SUBDISK) == 0)                /* subdisk is accessible, */
  831         bp->b_dev = DRIVE[rqe->driveno].dev;                /* drive device */
  832     bp->b_blkno = rqe->sdoffset + sd->driveoffset;          /* start address */
  833     bp->b_bcount = rqe->buflen << DEV_BSHIFT;               /* number of bytes to transfer */
  834     bp->b_resid = bp->b_bcount;                             /* and it's still all waiting */
  835     bp->b_bufsize = bp->b_bcount;                           /* and buffer size */
  836     bp->b_rcred = FSCRED;                                   /* we have the filesystem credentials */
  837     bp->b_wcred = FSCRED;                                   /* we have the filesystem credentials */
  838 
  839     if (rqe->flags & XFR_MALLOCED) {                        /* this operation requires a malloced buffer */
  840         bp->b_data = Malloc(bp->b_bcount);                  /* get a buffer to put it in */
  841         if (bp->b_data == NULL) {                           /* failed */
  842             abortrequest(rqe->rqg->rq, ENOMEM);
  843             return REQUEST_ENOMEM;                          /* no memory */
  844         }
  845     } else
  846         /*
  847          * Point directly to user buffer data.  This means
  848          * that we don't need to do anything when we have
  849          * finished the transfer
  850          */
  851         bp->b_data = ubp->b_data + rqe->useroffset * DEV_BSIZE;
  852     /*
  853      * On a recovery read, we perform an XOR of
  854      * all blocks to the user buffer.  To make
  855      * this work, we first clean out the buffer
  856      */
  857     if ((rqe->flags & (XFR_RECOVERY_READ | XFR_BAD_SUBDISK))
  858         == (XFR_RECOVERY_READ | XFR_BAD_SUBDISK)) {         /* bad subdisk of a recovery read */
  859         int length = rqe->grouplen << DEV_BSHIFT;           /* and count involved */
  860         char *data = (char *) &rqe->b.b_data[rqe->groupoffset << DEV_BSHIFT]; /* destination */
  861 
  862         bzero(data, length);                                /* clean it out */
  863     }
  864     return 0;
  865 }
  866 
  867 /*
  868  * Abort a request: free resources and complete the
  869  * user request with the specified error
  870  */
  871 int
  872 abortrequest(struct request *rq, int error)
  873 {
  874     struct buf *bp = rq->bp;                                /* user buffer */
  875 
  876     bp->b_error = error;
  877     freerq(rq);                                             /* free everything we're doing */
  878     bp->b_io.bio_flags |= BIO_ERROR;
  879     return error;                                           /* and give up */
  880 }
  881 
  882 /*
  883  * Check that our transfer will cover the
  884  * complete address space of the user request.
  885  *
  886  * Return 1 if it can, otherwise 0
  887  */
  888 int
  889 check_range_covered(struct request *rq)
  890 {
  891     return 1;
  892 }
  893 
  894 /* Perform I/O on a subdisk */
  895 void
  896 sdio(struct buf *bp)
  897 {
  898     int s;                                                  /* spl */
  899     struct sd *sd;
  900     struct sdbuf *sbp;
  901     daddr_t endoffset;
  902     struct drive *drive;
  903 
  904 #ifdef VINUMDEBUG
  905     if (debug & DEBUG_LASTREQS)
  906         logrq(loginfo_sdio, (union rqinfou) bp, bp);
  907 #endif
  908     sd = &SD[Sdno(bp->b_dev)];                              /* point to the subdisk */
  909     drive = &DRIVE[sd->driveno];
  910 
  911     if (drive->state != drive_up) {
  912         if (sd->state >= sd_crashed) {
  913             if (bp->b_iocmd == BIO_WRITE)                   /* writing, */
  914                 set_sd_state(sd->sdno, sd_stale, setstate_force);
  915             else
  916                 set_sd_state(sd->sdno, sd_crashed, setstate_force);
  917         }
  918         bp->b_error = EIO;
  919         bp->b_io.bio_flags |= BIO_ERROR;
  920         bufdone(bp);
  921         return;
  922     }
  923     /*
  924      * We allow access to any kind of subdisk as long as we can expect
  925      * to get the I/O performed.
  926      */
  927     if (sd->state < sd_empty) {                             /* nothing to talk to, */
  928         bp->b_error = EIO;
  929         bp->b_io.bio_flags |= BIO_ERROR;
  930         bufdone(bp);
  931         return;
  932     }
  933     /* Get a buffer */
  934     sbp = (struct sdbuf *) Malloc(sizeof(struct sdbuf));
  935     if (sbp == NULL) {
  936         bp->b_error = ENOMEM;
  937         bp->b_io.bio_flags |= BIO_ERROR;
  938         bufdone(bp);
  939         return;
  940     }
  941     bzero(sbp, sizeof(struct sdbuf));                       /* start with nothing */
  942     sbp->b.b_flags = bp->b_flags;
  943     sbp->b.b_iocmd = bp->b_iocmd;
  944     sbp->b.b_bufsize = bp->b_bufsize;                       /* buffer size */
  945     sbp->b.b_bcount = bp->b_bcount;                         /* number of bytes to transfer */
  946     sbp->b.b_resid = bp->b_resid;                           /* and amount waiting */
  947     sbp->b.b_dev = DRIVE[sd->driveno].dev;                  /* device */
  948     sbp->b.b_data = bp->b_data;                             /* data buffer */
  949     sbp->b.b_blkno = bp->b_blkno + sd->driveoffset;
  950     sbp->b.b_iodone = sdio_done;                            /* come here on completion */
  951     BUF_LOCKINIT(&sbp->b);                                  /* get a lock for the buffer */
  952     BUF_LOCK(&sbp->b, LK_EXCLUSIVE);                        /* and lock it */
  953     BUF_KERNPROC(&sbp->b);
  954     sbp->bp = bp;                                           /* note the address of the original header */
  955     sbp->sdno = sd->sdno;                                   /* note for statistics */
  956     sbp->driveno = sd->driveno;
  957     endoffset = bp->b_blkno + sbp->b.b_bcount / DEV_BSIZE;  /* final sector offset */
  958     if (endoffset > sd->sectors) {                          /* beyond the end */
  959         sbp->b.b_bcount -= (endoffset - sd->sectors) * DEV_BSIZE; /* trim */
  960         if (sbp->b.b_bcount <= 0) {                         /* nothing to transfer */
  961             bp->b_resid = bp->b_bcount;                     /* nothing transferred */
  962             bufdone(bp);
  963             BUF_UNLOCK(&sbp->b);
  964             BUF_LOCKFREE(&sbp->b);
  965             Free(sbp);
  966             return;
  967         }
  968     }
  969 #ifdef VINUMDEBUG
  970     if (debug & DEBUG_ADDRESSES)
  971         log(LOG_DEBUG,
  972             "  %s dev %d.%d, sd %d, offset 0x%x, devoffset 0x%x, length %ld\n",
  973             sbp->b.b_iocmd == BIO_READ ? "Read" : "Write",
  974             major(sbp->b.b_dev),
  975             minor(sbp->b.b_dev),
  976             sbp->sdno,
  977             (u_int) (sbp->b.b_blkno - SD[sbp->sdno].driveoffset),
  978             (int) sbp->b.b_blkno,
  979             sbp->b.b_bcount);
  980 #endif
  981     s = splbio();
  982 #ifdef VINUMDEBUG
  983     if (debug & DEBUG_LASTREQS)
  984         logrq(loginfo_sdiol, (union rqinfou) &sbp->b, &sbp->b);
  985 #endif
  986     DEV_STRATEGY(&sbp->b, 0);
  987     splx(s);
  988 }
  989 
  990 /*
  991  * Simplified version of bounds_check_with_label
  992  * Determine the size of the transfer, and make sure it is
  993  * within the boundaries of the partition. Adjust transfer
  994  * if needed, and signal errors or early completion.
  995  *
  996  * Volumes are simpler than disk slices: they only contain
  997  * one component (though we call them a, b and c to make
  998  * system utilities happy), and they always take up the
  999  * complete space of the "partition".
 1000  *
 1001  * I'm still not happy with this: why should the label be
 1002  * protected?  If it weren't so damned difficult to write
 1003  * one in the first pleace (because it's protected), it wouldn't
 1004  * be a problem.
 1005  */
 1006 int
 1007 vinum_bounds_check(struct buf *bp, struct volume *vol)
 1008 {
 1009     int maxsize = vol->size;                                /* size of the partition (sectors) */
 1010     int size = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; /* size of this request (sectors) */
 1011 
 1012     /* Would this transfer overwrite the disk label? */
 1013     if (bp->b_blkno <= LABELSECTOR                          /* starts before or at the label */
 1014 #if LABELSECTOR != 0
 1015         && bp->b_blkno + size > LABELSECTOR                 /* and finishes after */
 1016 #endif
 1017         && (!(vol->flags & VF_RAW))                         /* and it's not raw */
 1018         &&(bp->b_iocmd == BIO_WRITE)                        /* and it's a write */
 1019         &&(!vol->flags & (VF_WLABEL | VF_LABELLING))) {     /* and we're not allowed to write the label */
 1020         bp->b_error = EROFS;                                /* read-only */
 1021         bp->b_io.bio_flags |= BIO_ERROR;
 1022         return -1;
 1023     }
 1024     if (size == 0)                                          /* no transfer specified, */
 1025         return 0;                                           /* treat as EOF */
 1026     /* beyond partition? */
 1027     if (bp->b_blkno < 0                                     /* negative start */
 1028         || bp->b_blkno + size > maxsize) {                  /* or goes beyond the end of the partition */
 1029         /* if exactly at end of disk, return an EOF */
 1030         if (bp->b_blkno == maxsize) {
 1031             bp->b_resid = bp->b_bcount;
 1032             return 0;
 1033         }
 1034         /* or truncate if part of it fits */
 1035         size = maxsize - bp->b_blkno;
 1036         if (size <= 0) {                                    /* nothing to transfer */
 1037             bp->b_error = EINVAL;
 1038             bp->b_io.bio_flags |= BIO_ERROR;
 1039             return -1;
 1040         }
 1041         bp->b_bcount = size << DEV_BSHIFT;
 1042     }
 1043     bp->b_pblkno = bp->b_blkno;
 1044     return 1;
 1045 }
 1046 
 1047 /*
 1048  * Allocate a request group and hook
 1049  * it in in the list for rq
 1050  */
 1051 struct rqgroup *
 1052 allocrqg(struct request *rq, int elements)
 1053 {
 1054     struct rqgroup *rqg;                                    /* the one we're going to allocate */
 1055     int size = sizeof(struct rqgroup) + elements * sizeof(struct rqelement);
 1056 
 1057     rqg = (struct rqgroup *) Malloc(size);
 1058     if (rqg != NULL) {                                      /* malloc OK, */
 1059         if (rq->rqg)                                        /* we already have requests */
 1060             rq->lrqg->next = rqg;                           /* hang it off the end */
 1061         else                                                /* first request */
 1062             rq->rqg = rqg;                                  /* at the start */
 1063         rq->lrqg = rqg;                                     /* this one is the last in the list */
 1064 
 1065         bzero(rqg, size);                                   /* no old junk */
 1066         rqg->rq = rq;                                       /* point back to the parent request */
 1067         rqg->count = elements;                              /* number of requests in the group */
 1068         rqg->lockbase = -1;                                 /* no lock required yet */
 1069     }
 1070     return rqg;
 1071 }
 1072 
 1073 /*
 1074  * Deallocate a request group out of a chain.  We do
 1075  * this by linear search: the chain is short, this
 1076  * almost never happens, and currently it can only
 1077  * happen to the first member of the chain.
 1078  */
 1079 void
 1080 deallocrqg(struct rqgroup *rqg)
 1081 {
 1082     struct rqgroup *rqgc = rqg->rq->rqg;                    /* point to the request chain */
 1083 
 1084     if (rqg->lock)                                          /* got a lock? */
 1085         unlockrange(rqg->plexno, rqg->lock);                /* yes, free it */
 1086     if (rqgc == rqg)                                        /* we're first in line */
 1087         rqg->rq->rqg = rqg->next;                           /* unhook ourselves */
 1088     else {
 1089         while ((rqgc->next != NULL)                         /* find the group */
 1090         &&(rqgc->next != rqg))
 1091             rqgc = rqgc->next;
 1092         if (rqgc->next == NULL)
 1093             log(LOG_ERR,
 1094                 "vinum deallocrqg: rqg %p not found in request %p\n",
 1095                 rqg->rq,
 1096                 rqg);
 1097         else
 1098             rqgc->next = rqg->next;                         /* make the chain jump over us */
 1099     }
 1100     Free(rqg);
 1101 }
 1102 
 1103 /* Local Variables: */
 1104 /* fill-column: 50 */
 1105 /* End: */

Cache object: c60b52668e06eb6a1a97e53d904eb6e3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.