1 /*- 2 * Copyright (c) 1997, 1998, 1999 3 * Nan Yang Computer Services Limited. All rights reserved. 4 * 5 * Parts copyright (c) 1997, 1998 Cybernet Corporation, NetMAX project. 6 * 7 * Written by Greg Lehey 8 * 9 * This software is distributed under the so-called ``Berkeley 10 * License'': 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Nan Yang Computer 23 * Services Limited. 24 * 4. Neither the name of the Company nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * This software is provided ``as is'', and any express or implied 29 * warranties, including, but not limited to, the implied warranties of 30 * merchantability and fitness for a particular purpose are disclaimed. 31 * In no event shall the company or contributors be liable for any 32 * direct, indirect, incidental, special, exemplary, or consequential 33 * damages (including, but not limited to, procurement of substitute 34 * goods or services; loss of use, data, or profits; or business 35 * interruption) however caused and on any theory of liability, whether 36 * in contract, strict liability, or tort (including negligence or 37 * otherwise) arising in any way out of the use of this software, even if 38 * advised of the possibility of such damage. 39 * 40 * $Id: vinumrequest.c,v 1.30 2001/01/09 04:20:55 grog Exp grog $ 41 * $FreeBSD: src/sys/dev/vinum/vinumrequest.c,v 1.44.2.5 2002/08/28 04:30:56 grog Exp $ 42 * $DragonFly: src/sys/dev/raid/vinum/vinumrequest.c,v 1.6 2005/08/03 16:36:33 hmp Exp $ 43 */ 44 45 #include "vinumhdr.h" 46 #include "request.h" 47 #include <sys/resourcevar.h> 48 49 enum requeststatus bre(struct request *rq, 50 int plexno, 51 daddr_t * diskstart, 52 daddr_t diskend); 53 enum requeststatus bre5(struct request *rq, 54 int plexno, 55 daddr_t * diskstart, 56 daddr_t diskend); 57 enum requeststatus build_read_request(struct request *rq, int volplexno); 58 enum requeststatus build_write_request(struct request *rq); 59 enum requeststatus build_rq_buffer(struct rqelement *rqe, struct plex *plex); 60 int find_alternate_sd(struct request *rq); 61 int check_range_covered(struct request *); 62 void complete_rqe(struct buf *bp); 63 void complete_raid5_write(struct rqelement *); 64 int abortrequest(struct request *rq, int error); 65 void sdio_done(struct buf *bp); 66 int vinum_bounds_check(struct buf *bp, struct volume *vol); 67 caddr_t allocdatabuf(struct rqelement *rqe); 68 void freedatabuf(struct rqelement *rqe); 69 70 #ifdef VINUMDEBUG 71 struct rqinfo rqinfo[RQINFO_SIZE]; 72 struct rqinfo *rqip = rqinfo; 73 74 void 75 logrq(enum rqinfo_type type, union rqinfou info, struct buf *ubp) 76 { 77 crit_enter(); 78 79 microtime(&rqip->timestamp); /* when did this happen? */ 80 rqip->type = type; 81 rqip->bp = ubp; /* user buffer */ 82 switch (type) { 83 case loginfo_user_bp: 84 case loginfo_user_bpl: 85 case loginfo_sdio: /* subdisk I/O */ 86 case loginfo_sdiol: /* subdisk I/O launch */ 87 case loginfo_sdiodone: /* subdisk I/O complete */ 88 bcopy(info.bp, &rqip->info.b, sizeof(struct buf)); 89 rqip->devmajor = major(info.bp->b_dev); 90 rqip->devminor = minor(info.bp->b_dev); 91 break; 92 93 case loginfo_iodone: 94 case loginfo_rqe: 95 case loginfo_raid5_data: 96 case loginfo_raid5_parity: 97 bcopy(info.rqe, &rqip->info.rqe, sizeof(struct rqelement)); 98 rqip->devmajor = major(info.rqe->b.b_dev); 99 rqip->devminor = minor(info.rqe->b.b_dev); 100 break; 101 102 case loginfo_lockwait: 103 case loginfo_lock: 104 case loginfo_unlock: 105 bcopy(info.lockinfo, &rqip->info.lockinfo, sizeof(struct rangelock)); 106 107 break; 108 109 case loginfo_unused: 110 break; 111 } 112 rqip++; 113 if (rqip >= &rqinfo[RQINFO_SIZE]) /* wrap around */ 114 rqip = rqinfo; 115 crit_exit(); 116 } 117 118 #endif 119 120 void 121 vinumstrategy(struct buf *bp) 122 { 123 int volno; 124 struct volume *vol = NULL; 125 126 switch (DEVTYPE(bp->b_dev)) { 127 case VINUM_SD_TYPE: 128 case VINUM_RAWSD_TYPE: 129 sdio(bp); 130 return; 131 132 /* 133 * In fact, vinum doesn't handle drives: they're 134 * handled directly by the disk drivers 135 */ 136 case VINUM_DRIVE_TYPE: 137 default: 138 bp->b_error = EIO; /* I/O error */ 139 bp->b_flags |= B_ERROR; 140 biodone(bp); 141 return; 142 143 case VINUM_VOLUME_TYPE: /* volume I/O */ 144 volno = Volno(bp->b_dev); 145 vol = &VOL[volno]; 146 if (vol->state != volume_up) { /* can't access this volume */ 147 bp->b_error = EIO; /* I/O error */ 148 bp->b_flags |= B_ERROR; 149 biodone(bp); 150 return; 151 } 152 if (vinum_bounds_check(bp, vol) <= 0) { /* don't like them bounds */ 153 biodone(bp); 154 return; 155 } 156 /* FALLTHROUGH */ 157 /* 158 * Plex I/O is pretty much the same as volume I/O 159 * for a single plex. Indicate this by passing a NULL 160 * pointer (set above) for the volume 161 */ 162 case VINUM_PLEX_TYPE: 163 case VINUM_RAWPLEX_TYPE: 164 bp->b_resid = bp->b_bcount; /* transfer everything */ 165 vinumstart(bp, 0); 166 return; 167 } 168 } 169 170 /* 171 * Start a transfer. Return -1 on error, 172 * 0 if OK, 1 if we need to retry. 173 * Parameter reviveok is set when doing 174 * transfers for revives: it allows transfers to 175 * be started immediately when a revive is in 176 * progress. During revive, normal transfers 177 * are queued if they share address space with 178 * a currently active revive operation. 179 */ 180 int 181 vinumstart(struct buf *bp, int reviveok) 182 { 183 int plexno; 184 int maxplex; /* maximum number of plexes to handle */ 185 struct volume *vol; 186 struct request *rq; /* build up our request here */ 187 enum requeststatus status; 188 189 #if VINUMDEBUG 190 if (debug & DEBUG_LASTREQS) 191 logrq(loginfo_user_bp, (union rqinfou) bp, bp); 192 #endif 193 194 if ((bp->b_bcount % DEV_BSIZE) != 0) { /* bad length */ 195 bp->b_error = EINVAL; /* invalid size */ 196 bp->b_flags |= B_ERROR; 197 biodone(bp); 198 return -1; 199 } 200 rq = (struct request *) Malloc(sizeof(struct request)); /* allocate a request struct */ 201 if (rq == NULL) { /* can't do it */ 202 bp->b_error = ENOMEM; /* can't get memory */ 203 bp->b_flags |= B_ERROR; 204 biodone(bp); 205 return -1; 206 } 207 bzero(rq, sizeof(struct request)); 208 209 /* 210 * Note the volume ID. This can be NULL, which 211 * the request building functions use as an 212 * indication for single plex I/O 213 */ 214 rq->bp = bp; /* and the user buffer struct */ 215 216 if (DEVTYPE(bp->b_dev) == VINUM_VOLUME_TYPE) { /* it's a volume, */ 217 rq->volplex.volno = Volno(bp->b_dev); /* get the volume number */ 218 vol = &VOL[rq->volplex.volno]; /* and point to it */ 219 vol->active++; /* one more active request */ 220 maxplex = vol->plexes; /* consider all its plexes */ 221 } else { 222 vol = NULL; /* no volume */ 223 rq->volplex.plexno = Plexno(bp->b_dev); /* point to the plex */ 224 rq->isplex = 1; /* note that it's a plex */ 225 maxplex = 1; /* just the one plex */ 226 } 227 228 if (bp->b_flags & B_READ) { 229 /* 230 * This is a read request. Decide 231 * which plex to read from. 232 * 233 * There's a potential race condition here, 234 * since we're not locked, and we could end 235 * up multiply incrementing the round-robin 236 * counter. This doesn't have any serious 237 * effects, however. 238 */ 239 if (vol != NULL) { 240 plexno = vol->preferred_plex; /* get the plex to use */ 241 if (plexno < 0) { /* round robin */ 242 plexno = vol->last_plex_read; 243 vol->last_plex_read++; 244 if (vol->last_plex_read >= vol->plexes) /* got the the end? */ 245 vol->last_plex_read = 0; /* wrap around */ 246 } 247 status = build_read_request(rq, plexno); /* build a request */ 248 } else { 249 daddr_t diskaddr = bp->b_blkno; /* start offset of transfer */ 250 status = bre(rq, /* build a request list */ 251 rq->volplex.plexno, 252 &diskaddr, 253 diskaddr + (bp->b_bcount / DEV_BSIZE)); 254 } 255 256 if (status > REQUEST_RECOVERED) { /* can't satisfy it */ 257 if (status == REQUEST_DOWN) { /* not enough subdisks */ 258 bp->b_error = EIO; /* I/O error */ 259 bp->b_flags |= B_ERROR; 260 } 261 biodone(bp); 262 freerq(rq); 263 return -1; 264 } 265 return launch_requests(rq, reviveok); /* now start the requests if we can */ 266 } else 267 /* 268 * This is a write operation. We write to all plexes. If this is 269 * a RAID-4 or RAID-5 plex, we must also update the parity stripe. 270 */ 271 { 272 if (vol != NULL) 273 status = build_write_request(rq); /* Not all the subdisks are up */ 274 else { /* plex I/O */ 275 daddr_t diskstart; 276 277 diskstart = bp->b_blkno; /* start offset of transfer */ 278 status = bre(rq, 279 Plexno(bp->b_dev), 280 &diskstart, 281 bp->b_blkno + (bp->b_bcount / DEV_BSIZE)); /* build requests for the plex */ 282 } 283 if (status > REQUEST_RECOVERED) { /* can't satisfy it */ 284 if (status == REQUEST_DOWN) { /* not enough subdisks */ 285 bp->b_error = EIO; /* I/O error */ 286 bp->b_flags |= B_ERROR; 287 } 288 biodone(bp); 289 freerq(rq); 290 return -1; 291 } 292 return launch_requests(rq, reviveok); /* now start the requests if we can */ 293 } 294 } 295 296 /* 297 * Call the low-level strategy routines to 298 * perform the requests in a struct request 299 */ 300 int 301 launch_requests(struct request *rq, int reviveok) 302 { 303 struct rqgroup *rqg; 304 int rqno; /* loop index */ 305 struct rqelement *rqe; /* current element */ 306 struct drive *drive; 307 int rcount; /* request count */ 308 309 /* 310 * First find out whether we're reviving, and the 311 * request contains a conflict. If so, we hang 312 * the request off plex->waitlist of the first 313 * plex we find which is reviving 314 */ 315 316 if ((rq->flags & XFR_REVIVECONFLICT) /* possible revive conflict */ 317 &&(!reviveok)) { /* and we don't want to do it now, */ 318 struct sd *sd; 319 struct request *waitlist; /* point to the waitlist */ 320 321 sd = &SD[rq->sdno]; 322 if (sd->waitlist != NULL) { /* something there already, */ 323 waitlist = sd->waitlist; 324 while (waitlist->next != NULL) /* find the end */ 325 waitlist = waitlist->next; 326 waitlist->next = rq; /* hook our request there */ 327 } else 328 sd->waitlist = rq; /* hook our request at the front */ 329 330 #if VINUMDEBUG 331 if (debug & DEBUG_REVIVECONFLICT) 332 log(LOG_DEBUG, 333 "Revive conflict sd %d: %p\n%s dev %d.%d, offset 0x%x, length %ld\n", 334 rq->sdno, 335 rq, 336 rq->bp->b_flags & B_READ ? "Read" : "Write", 337 major(rq->bp->b_dev), 338 minor(rq->bp->b_dev), 339 rq->bp->b_blkno, 340 rq->bp->b_bcount); 341 #endif 342 return 0; /* and get out of here */ 343 } 344 rq->active = 0; /* nothing yet */ 345 #if VINUMDEBUG 346 if (debug & DEBUG_ADDRESSES) 347 log(LOG_DEBUG, 348 "Request: %p\n%s dev %d.%d, offset 0x%x, length %ld\n", 349 rq, 350 rq->bp->b_flags & B_READ ? "Read" : "Write", 351 major(rq->bp->b_dev), 352 minor(rq->bp->b_dev), 353 rq->bp->b_blkno, 354 rq->bp->b_bcount); 355 vinum_conf.lastrq = rq; 356 vinum_conf.lastbuf = rq->bp; 357 if (debug & DEBUG_LASTREQS) 358 logrq(loginfo_user_bpl, (union rqinfou) rq->bp, rq->bp); 359 #endif 360 361 /* 362 * This loop happens without any participation 363 * of the bottom half, so it requires no 364 * protection. 365 */ 366 for (rqg = rq->rqg; rqg != NULL; rqg = rqg->next) { /* through the whole request chain */ 367 rqg->active = rqg->count; /* they're all active */ 368 for (rqno = 0; rqno < rqg->count; rqno++) { 369 rqe = &rqg->rqe[rqno]; 370 if (rqe->flags & XFR_BAD_SUBDISK) /* this subdisk is bad, */ 371 rqg->active--; /* one less active request */ 372 } 373 if (rqg->active) /* we have at least one active request, */ 374 rq->active++; /* one more active request group */ 375 } 376 377 /* 378 * Now fire off the requests. In this loop the 379 * bottom half could be completing requests 380 * before we finish, so we need critical section protection. 381 */ 382 crit_enter(); 383 for (rqg = rq->rqg; rqg != NULL;) { /* through the whole request chain */ 384 if (rqg->lockbase >= 0) /* this rqg needs a lock first */ 385 rqg->lock = lockrange(rqg->lockbase, rqg->rq->bp, &PLEX[rqg->plexno]); 386 rcount = rqg->count; 387 for (rqno = 0; rqno < rcount;) { 388 rqe = &rqg->rqe[rqno]; 389 390 /* 391 * Point to next rqg before the bottom end 392 * changes the structures. 393 */ 394 if (++rqno >= rcount) 395 rqg = rqg->next; 396 if ((rqe->flags & XFR_BAD_SUBDISK) == 0) { /* this subdisk is good, */ 397 drive = &DRIVE[rqe->driveno]; /* look at drive */ 398 drive->active++; 399 if (drive->active >= drive->maxactive) 400 drive->maxactive = drive->active; 401 vinum_conf.active++; 402 if (vinum_conf.active >= vinum_conf.maxactive) 403 vinum_conf.maxactive = vinum_conf.active; 404 405 #ifdef VINUMDEBUG 406 if (debug & DEBUG_ADDRESSES) 407 log(LOG_DEBUG, 408 " %s dev %d.%d, sd %d, offset 0x%x, devoffset 0x%x, length %ld\n", 409 rqe->b.b_flags & B_READ ? "Read" : "Write", 410 major(rqe->b.b_dev), 411 minor(rqe->b.b_dev), 412 rqe->sdno, 413 (u_int) (rqe->b.b_blkno - SD[rqe->sdno].driveoffset), 414 rqe->b.b_blkno, 415 rqe->b.b_bcount); 416 if (debug & DEBUG_LASTREQS) 417 logrq(loginfo_rqe, (union rqinfou) rqe, rq->bp); 418 #endif 419 /* fire off the request */ 420 BUF_STRATEGY(&rqe->b, 0); 421 } 422 } 423 } 424 crit_exit(); 425 return 0; 426 } 427 428 /* 429 * define the low-level requests needed to perform a 430 * high-level I/O operation for a specific plex 'plexno'. 431 * 432 * Return REQUEST_OK if all subdisks involved in the request are up, 433 * REQUEST_DOWN if some subdisks are not up, and REQUEST_EOF if the 434 * request is at least partially outside the bounds of the subdisks. 435 * 436 * Modify the pointer *diskstart to point to the end address. On 437 * read, return on the first bad subdisk, so that the caller 438 * (build_read_request) can try alternatives. 439 * 440 * On entry to this routine, the rqg structures are not assigned. The 441 * assignment is performed by expandrq(). Strictly speaking, the 442 * elements rqe->sdno of all entries should be set to -1, since 0 443 * (from bzero) is a valid subdisk number. We avoid this problem by 444 * initializing the ones we use, and not looking at the others (index 445 * >= rqg->requests). 446 */ 447 enum requeststatus 448 bre(struct request *rq, 449 int plexno, 450 daddr_t * diskaddr, 451 daddr_t diskend) 452 { 453 int sdno; 454 struct sd *sd; 455 struct rqgroup *rqg; 456 struct buf *bp; /* user's bp */ 457 struct plex *plex; 458 enum requeststatus status; /* return value */ 459 daddr_t plexoffset; /* offset of transfer in plex */ 460 daddr_t stripebase; /* base address of stripe (1st subdisk) */ 461 daddr_t stripeoffset; /* offset in stripe */ 462 daddr_t blockoffset; /* offset in stripe on subdisk */ 463 struct rqelement *rqe; /* point to this request information */ 464 daddr_t diskstart = *diskaddr; /* remember where this transfer starts */ 465 enum requeststatus s; /* temp return value */ 466 467 bp = rq->bp; /* buffer pointer */ 468 status = REQUEST_OK; /* return value: OK until proven otherwise */ 469 plex = &PLEX[plexno]; /* point to the plex */ 470 471 switch (plex->organization) { 472 case plex_concat: 473 sd = NULL; /* (keep compiler quiet) */ 474 for (sdno = 0; sdno < plex->subdisks; sdno++) { 475 sd = &SD[plex->sdnos[sdno]]; 476 if (*diskaddr < sd->plexoffset) /* we must have a hole, */ 477 status = REQUEST_DEGRADED; /* note the fact */ 478 if (*diskaddr < (sd->plexoffset + sd->sectors)) { /* the request starts in this subdisk */ 479 rqg = allocrqg(rq, 1); /* space for the request */ 480 if (rqg == NULL) { /* malloc failed */ 481 bp->b_error = ENOMEM; 482 bp->b_flags |= B_ERROR; 483 return REQUEST_ENOMEM; 484 } 485 rqg->plexno = plexno; 486 487 rqe = &rqg->rqe[0]; /* point to the element */ 488 rqe->rqg = rqg; /* group */ 489 rqe->sdno = sd->sdno; /* put in the subdisk number */ 490 plexoffset = *diskaddr; /* start offset in plex */ 491 rqe->sdoffset = plexoffset - sd->plexoffset; /* start offset in subdisk */ 492 rqe->useroffset = plexoffset - diskstart; /* start offset in user buffer */ 493 rqe->dataoffset = 0; 494 rqe->datalen = min(diskend - *diskaddr, /* number of sectors to transfer in this sd */ 495 sd->sectors - rqe->sdoffset); 496 rqe->groupoffset = 0; /* no groups for concatenated plexes */ 497 rqe->grouplen = 0; 498 rqe->buflen = rqe->datalen; /* buffer length is data buffer length */ 499 rqe->flags = 0; 500 rqe->driveno = sd->driveno; 501 if (sd->state != sd_up) { /* *now* we find the sd is down */ 502 s = checksdstate(sd, rq, *diskaddr, diskend); /* do we need to change state? */ 503 if (s == REQUEST_DOWN) { /* down? */ 504 rqe->flags = XFR_BAD_SUBDISK; /* yup */ 505 if (rq->bp->b_flags & B_READ) /* read request, */ 506 return REQUEST_DEGRADED; /* give up here */ 507 /* 508 * If we're writing, don't give up 509 * because of a bad subdisk. Go 510 * through to the bitter end, but note 511 * which ones we can't access. 512 */ 513 status = REQUEST_DEGRADED; /* can't do it all */ 514 } 515 } 516 *diskaddr += rqe->datalen; /* bump the address */ 517 if (build_rq_buffer(rqe, plex)) { /* build the buffer */ 518 deallocrqg(rqg); 519 bp->b_error = ENOMEM; 520 bp->b_flags |= B_ERROR; 521 return REQUEST_ENOMEM; /* can't do it */ 522 } 523 } 524 if (*diskaddr == diskend) /* we're finished, */ 525 break; /* get out of here */ 526 } 527 /* 528 * We've got to the end of the plex. Have we got to the end of 529 * the transfer? It would seem that having an offset beyond the 530 * end of the subdisk is an error, but in fact it can happen if 531 * the volume has another plex of different size. There's a valid 532 * question as to why you would want to do this, but currently 533 * it's allowed. 534 * 535 * In a previous version, I returned REQUEST_DOWN here. I think 536 * REQUEST_EOF is more appropriate now. 537 */ 538 if (diskend > sd->sectors + sd->plexoffset) /* pointing beyond EOF? */ 539 status = REQUEST_EOF; 540 break; 541 542 case plex_striped: 543 { 544 while (*diskaddr < diskend) { /* until we get it all sorted out */ 545 if (*diskaddr >= plex->length) /* beyond the end of the plex */ 546 return REQUEST_EOF; /* can't continue */ 547 548 /* The offset of the start address from the start of the stripe. */ 549 stripeoffset = *diskaddr % (plex->stripesize * plex->subdisks); 550 551 /* The plex-relative address of the start of the stripe. */ 552 stripebase = *diskaddr - stripeoffset; 553 554 /* The number of the subdisk in which the start is located. */ 555 sdno = stripeoffset / plex->stripesize; 556 557 /* The offset from the beginning of the stripe on this subdisk. */ 558 blockoffset = stripeoffset % plex->stripesize; 559 560 sd = &SD[plex->sdnos[sdno]]; /* the subdisk in question */ 561 rqg = allocrqg(rq, 1); /* space for the request */ 562 if (rqg == NULL) { /* malloc failed */ 563 bp->b_error = ENOMEM; 564 bp->b_flags |= B_ERROR; 565 return REQUEST_ENOMEM; 566 } 567 rqg->plexno = plexno; 568 569 rqe = &rqg->rqe[0]; /* point to the element */ 570 rqe->rqg = rqg; 571 rqe->sdoffset = stripebase / plex->subdisks + blockoffset; /* start offset in this subdisk */ 572 rqe->useroffset = *diskaddr - diskstart; /* The offset of the start in the user buffer */ 573 rqe->dataoffset = 0; 574 rqe->datalen = min(diskend - *diskaddr, /* the amount remaining to transfer */ 575 plex->stripesize - blockoffset); /* and the amount left in this stripe */ 576 rqe->groupoffset = 0; /* no groups for striped plexes */ 577 rqe->grouplen = 0; 578 rqe->buflen = rqe->datalen; /* buffer length is data buffer length */ 579 rqe->flags = 0; 580 rqe->sdno = sd->sdno; /* put in the subdisk number */ 581 rqe->driveno = sd->driveno; 582 583 if (sd->state != sd_up) { /* *now* we find the sd is down */ 584 s = checksdstate(sd, rq, *diskaddr, diskend); /* do we need to change state? */ 585 if (s == REQUEST_DOWN) { /* down? */ 586 rqe->flags = XFR_BAD_SUBDISK; /* yup */ 587 if (rq->bp->b_flags & B_READ) /* read request, */ 588 return REQUEST_DEGRADED; /* give up here */ 589 /* 590 * If we're writing, don't give up 591 * because of a bad subdisk. Go through 592 * to the bitter end, but note which 593 * ones we can't access. 594 */ 595 status = REQUEST_DEGRADED; /* can't do it all */ 596 } 597 } 598 /* 599 * It would seem that having an offset 600 * beyond the end of the subdisk is an 601 * error, but in fact it can happen if the 602 * volume has another plex of different 603 * size. There's a valid question as to why 604 * you would want to do this, but currently 605 * it's allowed. 606 */ 607 if (rqe->sdoffset + rqe->datalen > sd->sectors) { /* ends beyond the end of the subdisk? */ 608 rqe->datalen = sd->sectors - rqe->sdoffset; /* truncate */ 609 #if VINUMDEBUG 610 if (debug & DEBUG_EOFINFO) { /* tell on the request */ 611 log(LOG_DEBUG, 612 "vinum: EOF on plex %s, sd %s offset %x (user offset %x)\n", 613 plex->name, 614 sd->name, 615 (u_int) sd->sectors, 616 bp->b_blkno); 617 log(LOG_DEBUG, 618 "vinum: stripebase %x, stripeoffset %x, blockoffset %x\n", 619 stripebase, 620 stripeoffset, 621 blockoffset); 622 } 623 #endif 624 } 625 if (build_rq_buffer(rqe, plex)) { /* build the buffer */ 626 deallocrqg(rqg); 627 bp->b_error = ENOMEM; 628 bp->b_flags |= B_ERROR; 629 return REQUEST_ENOMEM; /* can't do it */ 630 } 631 *diskaddr += rqe->datalen; /* look at the remainder */ 632 if ((*diskaddr < diskend) /* didn't finish the request on this stripe */ 633 &&(*diskaddr < plex->length)) { /* and there's more to come */ 634 plex->multiblock++; /* count another one */ 635 if (sdno == plex->subdisks - 1) /* last subdisk, */ 636 plex->multistripe++; /* another stripe as well */ 637 } 638 } 639 } 640 break; 641 642 /* 643 * RAID-4 and RAID-5 are complicated enough to have their own 644 * function. 645 */ 646 case plex_raid4: 647 case plex_raid5: 648 status = bre5(rq, plexno, diskaddr, diskend); 649 break; 650 651 default: 652 log(LOG_ERR, "vinum: invalid plex type %d in bre\n", plex->organization); 653 status = REQUEST_DOWN; /* can't access it */ 654 } 655 656 return status; 657 } 658 659 /* 660 * Build up a request structure for reading volumes. 661 * This function is not needed for plex reads, since there's 662 * no recovery if a plex read can't be satisified. 663 */ 664 enum requeststatus 665 build_read_request(struct request *rq, /* request */ 666 int plexindex) 667 { /* index in the volume's plex table */ 668 struct buf *bp; 669 daddr_t startaddr; /* offset of previous part of transfer */ 670 daddr_t diskaddr; /* offset of current part of transfer */ 671 daddr_t diskend; /* and end offset of transfer */ 672 int plexno; /* plex index in vinum_conf */ 673 struct rqgroup *rqg; /* point to the request we're working on */ 674 struct volume *vol; /* volume in question */ 675 int recovered = 0; /* set if we recover a read */ 676 enum requeststatus status = REQUEST_OK; 677 int plexmask; /* bit mask of plexes, for recovery */ 678 679 bp = rq->bp; /* buffer pointer */ 680 diskaddr = bp->b_blkno; /* start offset of transfer */ 681 diskend = diskaddr + (bp->b_bcount / DEV_BSIZE); /* and end offset of transfer */ 682 rqg = &rq->rqg[plexindex]; /* plex request */ 683 vol = &VOL[rq->volplex.volno]; /* point to volume */ 684 685 while (diskaddr < diskend) { /* build up request components */ 686 startaddr = diskaddr; 687 status = bre(rq, vol->plex[plexindex], &diskaddr, diskend); /* build up a request */ 688 switch (status) { 689 case REQUEST_OK: 690 continue; 691 692 case REQUEST_RECOVERED: 693 /* 694 * XXX FIXME if we have more than one plex, and we can 695 * satisfy the request from another, don't use the 696 * recovered request, since it's more expensive. 697 */ 698 recovered = 1; 699 break; 700 701 case REQUEST_ENOMEM: 702 return status; 703 /* 704 * If we get here, our request is not complete. Try 705 * to fill in the missing parts from another plex. 706 * This can happen multiple times in this function, 707 * and we reinitialize the plex mask each time, since 708 * we could have a hole in our plexes. 709 */ 710 case REQUEST_EOF: 711 case REQUEST_DOWN: /* can't access the plex */ 712 case REQUEST_DEGRADED: /* can't access the plex */ 713 plexmask = ((1 << vol->plexes) - 1) /* all plexes in the volume */ 714 &~(1 << plexindex); /* except for the one we were looking at */ 715 for (plexno = 0; plexno < vol->plexes; plexno++) { 716 if (plexmask == 0) /* no plexes left to try */ 717 return REQUEST_DOWN; /* failed */ 718 diskaddr = startaddr; /* start at the beginning again */ 719 if (plexmask & (1 << plexno)) { /* we haven't tried this plex yet */ 720 bre(rq, vol->plex[plexno], &diskaddr, diskend); /* try a request */ 721 if (diskaddr > startaddr) { /* we satisfied another part */ 722 recovered = 1; /* we recovered from the problem */ 723 status = REQUEST_OK; /* don't complain about it */ 724 break; 725 } 726 } 727 } 728 if (diskaddr == startaddr) /* didn't get any further, */ 729 return status; 730 } 731 if (recovered) 732 vol->recovered_reads += recovered; /* adjust our recovery count */ 733 } 734 return status; 735 } 736 737 /* 738 * Build up a request structure for writes. 739 * Return 0 if all subdisks involved in the request are up, 1 if some 740 * subdisks are not up, and -1 if the request is at least partially 741 * outside the bounds of the subdisks. 742 */ 743 enum requeststatus 744 build_write_request(struct request *rq) 745 { /* request */ 746 struct buf *bp; 747 daddr_t diskstart; /* offset of current part of transfer */ 748 daddr_t diskend; /* and end offset of transfer */ 749 int plexno; /* plex index in vinum_conf */ 750 struct volume *vol; /* volume in question */ 751 enum requeststatus status; 752 753 bp = rq->bp; /* buffer pointer */ 754 vol = &VOL[rq->volplex.volno]; /* point to volume */ 755 diskend = bp->b_blkno + (bp->b_bcount / DEV_BSIZE); /* end offset of transfer */ 756 status = REQUEST_DOWN; /* assume the worst */ 757 for (plexno = 0; plexno < vol->plexes; plexno++) { 758 diskstart = bp->b_blkno; /* start offset of transfer */ 759 /* 760 * Build requests for the plex. 761 * We take the best possible result here (min, 762 * not max): we're happy if we can write at all 763 */ 764 status = min(status, bre(rq, 765 vol->plex[plexno], 766 &diskstart, 767 diskend)); 768 } 769 return status; 770 } 771 772 /* Fill in the struct buf part of a request element. */ 773 enum requeststatus 774 build_rq_buffer(struct rqelement *rqe, struct plex *plex) 775 { 776 struct sd *sd; /* point to subdisk */ 777 struct volume *vol; 778 struct buf *bp; 779 struct buf *ubp; /* user (high level) buffer header */ 780 781 vol = &VOL[rqe->rqg->rq->volplex.volno]; 782 sd = &SD[rqe->sdno]; /* point to subdisk */ 783 bp = &rqe->b; 784 ubp = rqe->rqg->rq->bp; /* pointer to user buffer header */ 785 786 /* Initialize the buf struct */ 787 /* copy these flags from user bp */ 788 bp->b_flags = ubp->b_flags & (B_ORDERED | B_NOCACHE | B_READ | B_ASYNC); 789 #ifdef VINUMDEBUG 790 if (rqe->flags & XFR_BUFLOCKED) /* paranoia */ 791 panic("build_rq_buffer: rqe already locked"); /* XXX remove this when we're sure */ 792 #endif 793 BUF_LOCKINIT(bp); /* get a lock for the buffer */ 794 BUF_LOCK(bp, LK_EXCLUSIVE); /* and lock it */ 795 BUF_KERNPROC(bp); 796 rqe->flags |= XFR_BUFLOCKED; 797 bp->b_iodone = complete_rqe; 798 /* 799 * You'd think that we wouldn't need to even 800 * build the request buffer for a dead subdisk, 801 * but in some cases we need information like 802 * the user buffer address. Err on the side of 803 * generosity and supply what we can. That 804 * obviously doesn't include drive information 805 * when the drive is dead. 806 */ 807 if ((rqe->flags & XFR_BAD_SUBDISK) == 0) /* subdisk is accessible, */ 808 bp->b_dev = DRIVE[rqe->driveno].dev; /* drive device */ 809 bp->b_blkno = rqe->sdoffset + sd->driveoffset; /* start address */ 810 bp->b_bcount = rqe->buflen << DEV_BSHIFT; /* number of bytes to transfer */ 811 bp->b_resid = bp->b_bcount; /* and it's still all waiting */ 812 bp->b_bufsize = bp->b_bcount; /* and buffer size */ 813 814 if (rqe->flags & XFR_MALLOCED) { /* this operation requires a malloced buffer */ 815 bp->b_data = Malloc(bp->b_bcount); /* get a buffer to put it in */ 816 if (bp->b_data == NULL) { /* failed */ 817 abortrequest(rqe->rqg->rq, ENOMEM); 818 return REQUEST_ENOMEM; /* no memory */ 819 } 820 } else 821 /* 822 * Point directly to user buffer data. This means 823 * that we don't need to do anything when we have 824 * finished the transfer 825 */ 826 bp->b_data = ubp->b_data + rqe->useroffset * DEV_BSIZE; 827 /* 828 * On a recovery read, we perform an XOR of 829 * all blocks to the user buffer. To make 830 * this work, we first clean out the buffer 831 */ 832 if ((rqe->flags & (XFR_RECOVERY_READ | XFR_BAD_SUBDISK)) 833 == (XFR_RECOVERY_READ | XFR_BAD_SUBDISK)) { /* bad subdisk of a recovery read */ 834 int length = rqe->grouplen << DEV_BSHIFT; /* and count involved */ 835 char *data = (char *) &rqe->b.b_data[rqe->groupoffset << DEV_BSHIFT]; /* destination */ 836 837 bzero(data, length); /* clean it out */ 838 } 839 return 0; 840 } 841 842 /* 843 * Abort a request: free resources and complete the 844 * user request with the specified error 845 */ 846 int 847 abortrequest(struct request *rq, int error) 848 { 849 struct buf *bp = rq->bp; /* user buffer */ 850 851 bp->b_error = error; 852 freerq(rq); /* free everything we're doing */ 853 bp->b_flags |= B_ERROR; 854 return error; /* and give up */ 855 } 856 857 /* 858 * Check that our transfer will cover the 859 * complete address space of the user request. 860 * 861 * Return 1 if it can, otherwise 0 862 */ 863 int 864 check_range_covered(struct request *rq) 865 { 866 return 1; 867 } 868 869 /* Perform I/O on a subdisk */ 870 void 871 sdio(struct buf *bp) 872 { 873 struct sd *sd; 874 struct sdbuf *sbp; 875 daddr_t endoffset; 876 struct drive *drive; 877 878 #if VINUMDEBUG 879 if (debug & DEBUG_LASTREQS) 880 logrq(loginfo_sdio, (union rqinfou) bp, bp); 881 #endif 882 sd = &SD[Sdno(bp->b_dev)]; /* point to the subdisk */ 883 drive = &DRIVE[sd->driveno]; 884 885 if (drive->state != drive_up) { 886 if (sd->state >= sd_crashed) { 887 if ((bp->b_flags & B_READ) == 0) /* writing, */ 888 set_sd_state(sd->sdno, sd_stale, setstate_force); 889 else 890 set_sd_state(sd->sdno, sd_crashed, setstate_force); 891 } 892 bp->b_error = EIO; 893 bp->b_flags |= B_ERROR; 894 biodone(bp); 895 return; 896 } 897 /* 898 * We allow access to any kind of subdisk as long as we can expect 899 * to get the I/O performed. 900 */ 901 if (sd->state < sd_empty) { /* nothing to talk to, */ 902 bp->b_error = EIO; 903 bp->b_flags |= B_ERROR; 904 biodone(bp); 905 return; 906 } 907 /* Get a buffer */ 908 sbp = (struct sdbuf *) Malloc(sizeof(struct sdbuf)); 909 if (sbp == NULL) { 910 bp->b_error = ENOMEM; 911 bp->b_flags |= B_ERROR; 912 biodone(bp); 913 return; 914 } 915 bzero(sbp, sizeof(struct sdbuf)); /* start with nothing */ 916 sbp->b.b_flags = bp->b_flags; 917 sbp->b.b_bufsize = bp->b_bufsize; /* buffer size */ 918 sbp->b.b_bcount = bp->b_bcount; /* number of bytes to transfer */ 919 sbp->b.b_resid = bp->b_resid; /* and amount waiting */ 920 sbp->b.b_dev = DRIVE[sd->driveno].dev; /* device */ 921 sbp->b.b_data = bp->b_data; /* data buffer */ 922 sbp->b.b_blkno = bp->b_blkno + sd->driveoffset; 923 sbp->b.b_iodone = sdio_done; /* come here on completion */ 924 BUF_LOCKINIT(&sbp->b); /* get a lock for the buffer */ 925 BUF_LOCK(&sbp->b, LK_EXCLUSIVE); /* and lock it */ 926 BUF_KERNPROC(&sbp->b); 927 sbp->bp = bp; /* note the address of the original header */ 928 sbp->sdno = sd->sdno; /* note for statistics */ 929 sbp->driveno = sd->driveno; 930 endoffset = bp->b_blkno + sbp->b.b_bcount / DEV_BSIZE; /* final sector offset */ 931 if (endoffset > sd->sectors) { /* beyond the end */ 932 sbp->b.b_bcount -= (endoffset - sd->sectors) * DEV_BSIZE; /* trim */ 933 if (sbp->b.b_bcount <= 0) { /* nothing to transfer */ 934 bp->b_resid = bp->b_bcount; /* nothing transferred */ 935 biodone(bp); 936 BUF_UNLOCK(&sbp->b); 937 BUF_LOCKFREE(&sbp->b); 938 Free(sbp); 939 return; 940 } 941 } 942 #if VINUMDEBUG 943 if (debug & DEBUG_ADDRESSES) 944 log(LOG_DEBUG, 945 " %s dev %d.%d, sd %d, offset 0x%x, devoffset 0x%x, length %ld\n", 946 sbp->b.b_flags & B_READ ? "Read" : "Write", 947 major(sbp->b.b_dev), 948 minor(sbp->b.b_dev), 949 sbp->sdno, 950 (u_int) (sbp->b.b_blkno - SD[sbp->sdno].driveoffset), 951 (int) sbp->b.b_blkno, 952 sbp->b.b_bcount); 953 #endif 954 crit_enter(); 955 #if VINUMDEBUG 956 if (debug & DEBUG_LASTREQS) 957 logrq(loginfo_sdiol, (union rqinfou) &sbp->b, &sbp->b); 958 #endif 959 BUF_STRATEGY(&sbp->b, 0); 960 crit_exit(); 961 } 962 963 /* 964 * Simplified version of bounds_check_with_label 965 * Determine the size of the transfer, and make sure it is 966 * within the boundaries of the partition. Adjust transfer 967 * if needed, and signal errors or early completion. 968 * 969 * Volumes are simpler than disk slices: they only contain 970 * one component (though we call them a, b and c to make 971 * system utilities happy), and they always take up the 972 * complete space of the "partition". 973 * 974 * I'm still not happy with this: why should the label be 975 * protected? If it weren't so damned difficult to write 976 * one in the first pleace (because it's protected), it wouldn't 977 * be a problem. 978 */ 979 int 980 vinum_bounds_check(struct buf *bp, struct volume *vol) 981 { 982 int maxsize = vol->size; /* size of the partition (sectors) */ 983 int size = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; /* size of this request (sectors) */ 984 985 /* Would this transfer overwrite the disk label? */ 986 if (bp->b_blkno <= LABELSECTOR /* starts before or at the label */ 987 #if LABELSECTOR != 0 988 && bp->b_blkno + size > LABELSECTOR /* and finishes after */ 989 #endif 990 && (!(vol->flags & VF_RAW)) /* and it's not raw */ 991 &&((bp->b_flags & B_READ) == 0) /* and it's a write */ 992 &&(!vol->flags & (VF_WLABEL | VF_LABELLING))) { /* and we're not allowed to write the label */ 993 bp->b_error = EROFS; /* read-only */ 994 bp->b_flags |= B_ERROR; 995 return -1; 996 } 997 if (size == 0) /* no transfer specified, */ 998 return 0; /* treat as EOF */ 999 /* beyond partition? */ 1000 if (bp->b_blkno < 0 /* negative start */ 1001 || bp->b_blkno + size > maxsize) { /* or goes beyond the end of the partition */ 1002 /* if exactly at end of disk, return an EOF */ 1003 if (bp->b_blkno == maxsize) { 1004 bp->b_resid = bp->b_bcount; 1005 return 0; 1006 } 1007 /* or truncate if part of it fits */ 1008 size = maxsize - bp->b_blkno; 1009 if (size <= 0) { /* nothing to transfer */ 1010 bp->b_error = EINVAL; 1011 bp->b_flags |= B_ERROR; 1012 return -1; 1013 } 1014 bp->b_bcount = size << DEV_BSHIFT; 1015 } 1016 bp->b_pblkno = bp->b_blkno; 1017 return 1; 1018 } 1019 1020 /* 1021 * Allocate a request group and hook 1022 * it in in the list for rq 1023 */ 1024 struct rqgroup * 1025 allocrqg(struct request *rq, int elements) 1026 { 1027 struct rqgroup *rqg; /* the one we're going to allocate */ 1028 int size = sizeof(struct rqgroup) + elements * sizeof(struct rqelement); 1029 1030 rqg = (struct rqgroup *) Malloc(size); 1031 if (rqg != NULL) { /* malloc OK, */ 1032 if (rq->rqg) /* we already have requests */ 1033 rq->lrqg->next = rqg; /* hang it off the end */ 1034 else /* first request */ 1035 rq->rqg = rqg; /* at the start */ 1036 rq->lrqg = rqg; /* this one is the last in the list */ 1037 1038 bzero(rqg, size); /* no old junk */ 1039 rqg->rq = rq; /* point back to the parent request */ 1040 rqg->count = elements; /* number of requests in the group */ 1041 rqg->lockbase = -1; /* no lock required yet */ 1042 } 1043 return rqg; 1044 } 1045 1046 /* 1047 * Deallocate a request group out of a chain. We do 1048 * this by linear search: the chain is short, this 1049 * almost never happens, and currently it can only 1050 * happen to the first member of the chain. 1051 */ 1052 void 1053 deallocrqg(struct rqgroup *rqg) 1054 { 1055 struct rqgroup *rqgc = rqg->rq->rqg; /* point to the request chain */ 1056 1057 if (rqg->lock) /* got a lock? */ 1058 unlockrange(rqg->plexno, rqg->lock); /* yes, free it */ 1059 if (rqgc == rqg) /* we're first in line */ 1060 rqg->rq->rqg = rqg->next; /* unhook ourselves */ 1061 else { 1062 while ((rqgc->next != NULL) /* find the group */ 1063 &&(rqgc->next != rqg)) 1064 rqgc = rqgc->next; 1065 if (rqgc->next == NULL) 1066 log(LOG_ERR, 1067 "vinum deallocrqg: rqg %p not found in request %p\n", 1068 rqg->rq, 1069 rqg); 1070 else 1071 rqgc->next = rqg->next; /* make the chain jump over us */ 1072 } 1073 Free(rqg); 1074 } 1075 1076 /* Local Variables: */ 1077 /* fill-column: 50 */ 1078 /* End: */ 1079