1 /*- 2 * Copyright (c) 1997, 1998, 1999 3 * Nan Yang Computer Services Limited. All rights reserved. 4 * 5 * Parts copyright (c) 1997, 1998 Cybernet Corporation, NetMAX project. 6 * 7 * Written by Greg Lehey 8 * 9 * This software is distributed under the so-called ``Berkeley 10 * License'': 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Nan Yang Computer 23 * Services Limited. 24 * 4. Neither the name of the Company nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * This software is provided ``as is'', and any express or implied 29 * warranties, including, but not limited to, the implied warranties of 30 * merchantability and fitness for a particular purpose are disclaimed. 31 * In no event shall the company or contributors be liable for any 32 * direct, indirect, incidental, special, exemplary, or consequential 33 * damages (including, but not limited to, procurement of substitute 34 * goods or services; loss of use, data, or profits; or business 35 * interruption) however caused and on any theory of liability, whether 36 * in contract, strict liability, or tort (including negligence or 37 * otherwise) arising in any way out of the use of this software, even if 38 * advised of the possibility of such damage. 39 * 40 * $Id: vinumrevive.c,v 1.14 2000/12/21 01:55:11 grog Exp grog $ 41 * $FreeBSD: src/sys/dev/vinum/vinumrevive.c,v 1.22.2.5 2001/03/13 02:59:43 grog Exp $ 42 * $DragonFly: src/sys/dev/raid/vinum/vinumrevive.c,v 1.10 2006/04/30 17:22:17 dillon Exp $ 43 */ 44 45 #include "vinumhdr.h" 46 #include "request.h" 47 48 /* 49 * Revive a block of a subdisk. Return an error 50 * indication. EAGAIN means successful copy, but 51 * that more blocks remain to be copied. EINVAL 52 * means that the subdisk isn't associated with a 53 * plex (which means a programming error if we get 54 * here at all; FIXME). 55 */ 56 57 int 58 revive_block(int sdno) 59 { 60 struct sd *sd; 61 struct plex *plex; 62 struct volume *vol; 63 struct buf *bp; 64 dev_t dev; 65 int error = EAGAIN; 66 int size; /* size of revive block, bytes */ 67 daddr_t plexblkno; /* lblkno in plex */ 68 int psd; /* parity subdisk number */ 69 u_int64_t stripe; /* stripe number */ 70 int paritysd = 0; /* set if this is the parity stripe */ 71 struct rangelock *lock; /* for locking */ 72 daddr_t stripeoffset; /* offset in stripe */ 73 74 plexblkno = 0; /* to keep the compiler happy */ 75 sd = &SD[sdno]; 76 lock = NULL; 77 if (sd->plexno < 0) /* no plex? */ 78 return EINVAL; 79 plex = &PLEX[sd->plexno]; /* point to plex */ 80 if (plex->volno >= 0) 81 vol = &VOL[plex->volno]; 82 else 83 vol = NULL; 84 85 if ((sd->revive_blocksize == 0) /* no block size */ 86 ||(sd->revive_blocksize & ((1 << DEV_BSHIFT) - 1))) /* or invalid block size */ 87 sd->revive_blocksize = DEFAULT_REVIVE_BLOCKSIZE; 88 else if (sd->revive_blocksize > MAX_REVIVE_BLOCKSIZE) 89 sd->revive_blocksize = MAX_REVIVE_BLOCKSIZE; 90 size = min(sd->revive_blocksize >> DEV_BSHIFT, sd->sectors - sd->revived) << DEV_BSHIFT; 91 sd->reviver = curproc->p_pid; /* note who last had a bash at it */ 92 93 /* Now decide where to read from */ 94 switch (plex->organization) { 95 case plex_concat: 96 plexblkno = sd->revived + sd->plexoffset; /* corresponding address in plex */ 97 break; 98 99 case plex_striped: 100 stripeoffset = sd->revived % plex->stripesize; /* offset from beginning of stripe */ 101 if (stripeoffset + (size >> DEV_BSHIFT) > plex->stripesize) 102 size = (plex->stripesize - stripeoffset) << DEV_BSHIFT; 103 plexblkno = sd->plexoffset /* base */ 104 + (sd->revived - stripeoffset) * plex->subdisks /* offset to beginning of stripe */ 105 + stripeoffset; /* offset from beginning of stripe */ 106 break; 107 108 case plex_raid4: 109 case plex_raid5: 110 stripeoffset = sd->revived % plex->stripesize; /* offset from beginning of stripe */ 111 plexblkno = sd->plexoffset /* base */ 112 + (sd->revived - stripeoffset) * (plex->subdisks - 1) /* offset to beginning of stripe */ 113 +stripeoffset; /* offset from beginning of stripe */ 114 stripe = (sd->revived / plex->stripesize); /* stripe number */ 115 116 /* Make sure we don't go beyond the end of the band. */ 117 size = min(size, (plex->stripesize - stripeoffset) << DEV_BSHIFT); 118 if (plex->organization == plex_raid4) 119 psd = plex->subdisks - 1; /* parity subdisk for this stripe */ 120 else 121 psd = plex->subdisks - 1 - stripe % plex->subdisks; /* parity subdisk for this stripe */ 122 paritysd = plex->sdnos[psd] == sdno; /* note if it's the parity subdisk */ 123 124 /* 125 * Now adjust for the strangenesses 126 * in RAID-4 and RAID-5 striping. 127 */ 128 if (sd->plexsdno > psd) /* beyond the parity stripe, */ 129 plexblkno -= plex->stripesize; /* one stripe less */ 130 else if (paritysd) 131 plexblkno -= plex->stripesize * sd->plexsdno; /* go back to the beginning of the band */ 132 break; 133 134 case plex_disorg: /* to keep the compiler happy */ 135 break; 136 } 137 138 if (paritysd) { /* we're reviving a parity block, */ 139 bp = parityrebuild(plex, sd->revived, size, rebuildparity, &lock, NULL); /* do the grunt work */ 140 if (bp == NULL) /* no buffer space */ 141 return ENOMEM; /* chicken out */ 142 } else { /* data block */ 143 crit_enter(); 144 bp = geteblk(size); /* Get a buffer */ 145 crit_exit(); 146 if (bp == NULL) 147 return ENOMEM; 148 149 /* 150 * Amount to transfer: block size, unless it 151 * would overlap the end. 152 */ 153 bp->b_bcount = size; 154 bp->b_resid = bp->b_bcount; 155 bp->b_bio1.bio_offset = (off_t)plexblkno << DEV_BSHIFT; /* start here */ 156 if (isstriped(plex)) /* we need to lock striped plexes */ 157 lock = lockrange(plexblkno << DEV_BSHIFT, bp, plex); /* lock it */ 158 if (vol != NULL) /* it's part of a volume, */ 159 /* 160 * First, read the data from the volume. We 161 * don't care which plex, that's bre's job. 162 */ 163 dev = VINUMDEV(plex->volno, 0, 0, VINUM_VOLUME_TYPE); /* create the device number */ 164 else /* it's an unattached plex */ 165 dev = VINUM_PLEX(sd->plexno); /* create the device number */ 166 167 bp->b_flags = B_PAGING; /* either way, read it */ 168 bp->b_cmd = BUF_CMD_READ; 169 vinumstart(dev, &bp->b_bio1, 1); 170 biowait(bp); 171 } 172 173 if (bp->b_flags & B_ERROR) 174 error = bp->b_error; 175 else 176 /* Now write to the subdisk */ 177 { 178 dev = VINUM_SD(sdno); /* create the device number */ 179 bp->b_flags = B_ORDERED | B_PAGING; /* and make this an ordered write */ 180 bp->b_cmd = BUF_CMD_WRITE; 181 bp->b_resid = bp->b_bcount; 182 bp->b_bio1.bio_offset = (off_t)sd->revived << DEV_BSHIFT; /* write it to here */ 183 bp->b_bio1.bio_driver_info = dev; 184 sdio(&bp->b_bio1); /* perform the I/O */ 185 biowait(bp); 186 if (bp->b_flags & B_ERROR) 187 error = bp->b_error; 188 else { 189 sd->revived += bp->b_bcount >> DEV_BSHIFT; /* moved this much further down */ 190 if (sd->revived >= sd->sectors) { /* finished */ 191 sd->revived = 0; 192 set_sd_state(sdno, sd_up, setstate_force); /* bring the sd up */ 193 log(LOG_INFO, "vinum: %s is %s\n", sd->name, sd_state(sd->state)); 194 save_config(); /* and save the updated configuration */ 195 error = 0; /* we're done */ 196 } 197 } 198 if (lock) /* we took a lock, */ 199 unlockrange(sd->plexno, lock); /* give it back */ 200 while (sd->waitlist) { /* we have waiting requests */ 201 #if VINUMDEBUG 202 struct request *rq = sd->waitlist; 203 dev_t dev; 204 205 if (debug & DEBUG_REVIVECONFLICT) { 206 dev = rq->bio->bio_driver_info; 207 log(LOG_DEBUG, 208 "Relaunch revive conflict sd %d: %p\n%s dev %d.%d, offset 0x%llx, length %d\n", 209 rq->sdno, 210 rq, 211 (rq->bio->bio_buf->b_cmd == BUF_CMD_READ) ? "Read" : "Write", 212 major(dev), 213 minor(dev), 214 rq->bio->bio_offset, 215 rq->bio->bio_buf->b_bcount); 216 } 217 #endif 218 launch_requests(sd->waitlist, 1); /* do them now */ 219 sd->waitlist = sd->waitlist->next; /* and move on to the next */ 220 } 221 } 222 if (bp->b_qindex == 0) { /* not on a queue, */ 223 bp->b_flags |= B_INVAL; 224 bp->b_flags &= ~B_ERROR; 225 brelse(bp); /* is this kosher? */ 226 } 227 return error; 228 } 229 230 /* 231 * Check or rebuild the parity blocks of a RAID-4 232 * or RAID-5 plex. 233 * 234 * The variables plex->checkblock and 235 * plex->rebuildblock represent the 236 * subdisk-relative address of the stripe we're 237 * looking at, not the plex-relative address. We 238 * store it in the plex and not as a local 239 * variable because this function could be 240 * stopped, and we don't want to repeat the part 241 * we've already done. This is also the reason 242 * why we don't initialize it here except at the 243 * end. It gets initialized with the plex on 244 * creation. 245 * 246 * Each call to this function processes at most 247 * one stripe. We can't loop in this function, 248 * because we're unstoppable, so we have to be 249 * called repeatedly from userland. 250 */ 251 void 252 parityops(struct vinum_ioctl_msg *data) 253 { 254 int plexno; 255 struct plex *plex; 256 int size; /* I/O transfer size, bytes */ 257 int stripe; /* stripe number in plex */ 258 int psd; /* parity subdisk number */ 259 struct rangelock *lock; /* lock on stripe */ 260 struct _ioctl_reply *reply; 261 off_t pstripe; /* pointer to our stripe counter */ 262 struct buf *pbp; 263 off_t errorloc; /* offset of parity error */ 264 enum parityop op; /* operation to perform */ 265 266 plexno = data->index; 267 op = data->op; 268 pbp = NULL; 269 reply = (struct _ioctl_reply *) data; 270 reply->error = EAGAIN; /* expect to repeat this call */ 271 plex = &PLEX[plexno]; 272 if (!isparity(plex)) { /* not RAID-4 or RAID-5 */ 273 reply->error = EINVAL; 274 return; 275 } else if (plex->state < plex_flaky) { 276 reply->error = EIO; 277 strcpy(reply->msg, "Plex is not completely accessible\n"); 278 return; 279 } 280 pstripe = data->offset; 281 stripe = pstripe / plex->stripesize; /* stripe number */ 282 psd = plex->subdisks - 1 - stripe % plex->subdisks; /* parity subdisk for this stripe */ 283 size = min(DEFAULT_REVIVE_BLOCKSIZE, /* one block at a time */ 284 plex->stripesize << DEV_BSHIFT); 285 286 pbp = parityrebuild(plex, pstripe, size, op, &lock, &errorloc); /* do the grunt work */ 287 if (pbp == NULL) { /* no buffer space */ 288 reply->error = ENOMEM; 289 return; /* chicken out */ 290 } 291 /* 292 * Now we have a result in the data buffer of 293 * the parity buffer header, which we have kept. 294 * Decide what to do with it. 295 */ 296 reply->msg[0] = '\0'; /* until shown otherwise */ 297 if ((pbp->b_flags & B_ERROR) == 0) { /* no error */ 298 if ((op == rebuildparity) 299 || (op == rebuildandcheckparity)) { 300 pbp->b_cmd = BUF_CMD_WRITE; 301 pbp->b_resid = pbp->b_bcount; 302 sdio(&pbp->b_bio1); /* write the parity block */ 303 biowait(pbp); 304 } 305 if (((op == checkparity) 306 || (op == rebuildandcheckparity)) 307 && (errorloc != -1)) { 308 if (op == checkparity) 309 reply->error = EIO; 310 sprintf(reply->msg, 311 "Parity incorrect at offset 0x%llx\n", 312 errorloc); 313 } 314 if (reply->error == EAGAIN) { /* still OK, */ 315 plex->checkblock = pstripe + (pbp->b_bcount >> DEV_BSHIFT); /* moved this much further down */ 316 if (plex->checkblock >= SD[plex->sdnos[0]].sectors) { /* finished */ 317 plex->checkblock = 0; 318 reply->error = 0; 319 } 320 } 321 } 322 if (pbp->b_flags & B_ERROR) 323 reply->error = pbp->b_error; 324 pbp->b_flags |= B_INVAL; 325 pbp->b_flags &= ~B_ERROR; 326 brelse(pbp); 327 unlockrange(plexno, lock); 328 } 329 330 /* 331 * Rebuild a parity stripe. Return pointer to 332 * parity bp. On return, 333 * 334 * 1. The band is locked. The caller must unlock 335 * the band and release the buffer header. 336 * 337 * 2. All buffer headers except php have been 338 * released. The caller must release pbp. 339 * 340 * 3. For checkparity and rebuildandcheckparity, 341 * the parity is compared with the current 342 * parity block. If it's different, the 343 * offset of the error is returned to 344 * errorloc. The caller can set the value of 345 * the pointer to NULL if this is called for 346 * rebuilding parity. 347 * 348 * pstripe is the subdisk-relative base address of 349 * the data to be reconstructed, size is the size 350 * of the transfer in bytes. 351 */ 352 struct buf * 353 parityrebuild(struct plex *plex, 354 u_int64_t pstripe, 355 int size, 356 enum parityop op, 357 struct rangelock **lockp, 358 off_t * errorloc) 359 { 360 int error; 361 int sdno; 362 u_int64_t stripe; /* stripe number */ 363 int *parity_buf; /* buffer address for current parity block */ 364 int *newparity_buf; /* and for new parity block */ 365 int mysize; /* I/O transfer size for this transfer */ 366 int isize; /* mysize in ints */ 367 int i; 368 int psd; /* parity subdisk number */ 369 int newpsd; /* and "subdisk number" of new parity */ 370 struct buf **bpp; /* pointers to our bps */ 371 struct buf *pbp; /* buffer header for parity stripe */ 372 int *sbuf; 373 int bufcount; /* number of buffers we need */ 374 375 stripe = pstripe / plex->stripesize; /* stripe number */ 376 psd = plex->subdisks - 1 - stripe % plex->subdisks; /* parity subdisk for this stripe */ 377 parity_buf = NULL; /* to keep the compiler happy */ 378 error = 0; 379 380 /* 381 * It's possible that the default transfer size 382 * we chose is not a factor of the stripe size. 383 * We *must* limit this operation to a single 384 * stripe, at least for RAID-5 rebuild, since 385 * the parity subdisk changes between stripes, 386 * so in this case we need to perform a short 387 * transfer. Set variable mysize to reflect 388 * this. 389 */ 390 mysize = min(size, (plex->stripesize * (stripe + 1) - pstripe) << DEV_BSHIFT); 391 isize = mysize / (sizeof(int)); /* number of ints in the buffer */ 392 bufcount = plex->subdisks + 1; /* sd buffers plus result buffer */ 393 newpsd = plex->subdisks; 394 bpp = (struct buf **) Malloc(bufcount * sizeof(struct buf *)); /* array of pointers to bps */ 395 396 /* First, build requests for all subdisks */ 397 for (sdno = 0; sdno < bufcount; sdno++) { /* for each subdisk */ 398 if ((sdno != psd) || (op != rebuildparity)) { 399 /* Get a buffer header and initialize it. */ 400 crit_enter(); 401 bpp[sdno] = geteblk(mysize); /* Get a buffer */ 402 if (bpp[sdno] == NULL) { 403 while (sdno-- > 0) { /* release the ones we got */ 404 bpp[sdno]->b_flags |= B_INVAL; 405 brelse(bpp[sdno]); /* give back our resources */ 406 } 407 crit_exit(); 408 printf("vinum: can't allocate buffer space for parity op.\n"); 409 return NULL; /* no bpps */ 410 } 411 crit_exit(); 412 if (sdno == psd) 413 parity_buf = (int *) bpp[sdno]->b_data; 414 if (sdno == newpsd) /* the new one? */ 415 bpp[sdno]->b_bio1.bio_driver_info = VINUM_SD(plex->sdnos[psd]); /* write back to the parity SD */ 416 else 417 bpp[sdno]->b_bio1.bio_driver_info = VINUM_SD(plex->sdnos[sdno]); /* device number */ 418 bpp[sdno]->b_cmd = BUF_CMD_READ; /* either way, read it */ 419 bpp[sdno]->b_flags = B_PAGING; 420 bpp[sdno]->b_bcount = mysize; 421 bpp[sdno]->b_resid = bpp[sdno]->b_bcount; 422 bpp[sdno]->b_bio1.bio_offset = (off_t)pstripe << DEV_BSHIFT; /* transfer from here */ 423 } 424 } 425 426 /* Initialize result buffer */ 427 pbp = bpp[newpsd]; 428 newparity_buf = (int *) bpp[newpsd]->b_data; 429 bzero(newparity_buf, mysize); 430 431 /* 432 * Now lock the stripe with the first non-parity 433 * bp as locking bp. 434 */ 435 *lockp = lockrange(pstripe * plex->stripesize * (plex->subdisks - 1), 436 bpp[psd ? 0 : 1], 437 plex); 438 439 /* 440 * Then issue requests for all subdisks in 441 * parallel. Don't transfer the parity stripe 442 * if we're rebuilding parity, unless we also 443 * want to check it. 444 */ 445 for (sdno = 0; sdno < plex->subdisks; sdno++) { /* for each real subdisk */ 446 if ((sdno != psd) || (op != rebuildparity)) { 447 sdio(&bpp[sdno]->b_bio1); 448 } 449 } 450 451 /* 452 * Next, wait for the requests to complete. 453 * We wait in the order in which they were 454 * issued, which isn't necessarily the order in 455 * which they complete, but we don't have a 456 * convenient way of doing the latter, and the 457 * delay is minimal. 458 */ 459 for (sdno = 0; sdno < plex->subdisks; sdno++) { /* for each subdisk */ 460 if ((sdno != psd) || (op != rebuildparity)) { 461 biowait(bpp[sdno]); 462 if (bpp[sdno]->b_flags & B_ERROR) /* can't read, */ 463 error = bpp[sdno]->b_error; 464 else if (sdno != psd) { /* update parity */ 465 sbuf = (int *) bpp[sdno]->b_data; 466 for (i = 0; i < isize; i++) 467 ((int *) newparity_buf)[i] ^= sbuf[i]; /* xor in the buffer */ 468 } 469 } 470 if (sdno != psd) { /* release all bps except parity */ 471 bpp[sdno]->b_flags |= B_INVAL; 472 brelse(bpp[sdno]); /* give back our resources */ 473 } 474 } 475 476 /* 477 * If we're checking, compare the calculated 478 * and the read parity block. If they're 479 * different, return the plex-relative offset; 480 * otherwise return -1. 481 */ 482 if ((op == checkparity) 483 || (op == rebuildandcheckparity)) { 484 *errorloc = -1; /* no error yet */ 485 for (i = 0; i < isize; i++) { 486 if (parity_buf[i] != newparity_buf[i]) { 487 *errorloc = (off_t) (pstripe << DEV_BSHIFT) * (plex->subdisks - 1) 488 + i * sizeof(int); 489 break; 490 } 491 } 492 bpp[psd]->b_flags |= B_INVAL; 493 brelse(bpp[psd]); /* give back our resources */ 494 } 495 /* release our resources */ 496 Free(bpp); 497 if (error) { 498 pbp->b_flags |= B_ERROR; 499 pbp->b_error = error; 500 } 501 return pbp; 502 } 503 504 /* 505 * Initialize a subdisk by writing zeroes to the 506 * complete address space. If verify is set, 507 * check each transfer for correctness. 508 * 509 * Each call to this function writes (and maybe 510 * checks) a single block. 511 */ 512 int 513 initsd(int sdno, int verify) 514 { 515 struct sd *sd; 516 struct plex *plex; 517 struct volume *vol; 518 struct buf *bp; 519 int error; 520 int size; /* size of init block, bytes */ 521 daddr_t plexblkno; /* lblkno in plex */ 522 int verified; /* set when we're happy with what we wrote */ 523 524 error = 0; 525 plexblkno = 0; /* to keep the compiler happy */ 526 sd = &SD[sdno]; 527 if (sd->plexno < 0) /* no plex? */ 528 return EINVAL; 529 plex = &PLEX[sd->plexno]; /* point to plex */ 530 if (plex->volno >= 0) 531 vol = &VOL[plex->volno]; 532 else 533 vol = NULL; 534 535 if (sd->init_blocksize == 0) { 536 if (plex->stripesize != 0) /* we're striped, don't init more than */ 537 sd->init_blocksize = min(DEFAULT_REVIVE_BLOCKSIZE, /* one block at a time */ 538 plex->stripesize << DEV_BSHIFT); 539 else 540 sd->init_blocksize = DEFAULT_REVIVE_BLOCKSIZE; 541 } else if (sd->init_blocksize > MAX_REVIVE_BLOCKSIZE) 542 sd->init_blocksize = MAX_REVIVE_BLOCKSIZE; 543 544 size = min(sd->init_blocksize >> DEV_BSHIFT, sd->sectors - sd->initialized) << DEV_BSHIFT; 545 546 verified = 0; 547 while (!verified) { /* until we're happy with it, */ 548 crit_enter(); 549 bp = geteblk(size); /* Get a buffer */ 550 crit_exit(); 551 if (bp == NULL) 552 return ENOMEM; 553 554 bp->b_bcount = size; 555 bp->b_resid = bp->b_bcount; 556 bp->b_bio1.bio_offset = (off_t)sd->initialized << DEV_BSHIFT; /* write it to here */ 557 bp->b_bio1.bio_driver_info = VINUM_SD(sdno); 558 bzero(bp->b_data, bp->b_bcount); 559 bp->b_cmd = BUF_CMD_WRITE; 560 sdio(&bp->b_bio1); /* perform the I/O */ 561 biowait(bp); 562 if (bp->b_flags & B_ERROR) 563 error = bp->b_error; 564 if (bp->b_qindex == 0) { /* not on a queue, */ 565 bp->b_flags |= B_INVAL; 566 bp->b_flags &= ~B_ERROR; 567 brelse(bp); /* is this kosher? */ 568 } 569 if ((error == 0) && verify) { /* check that it got there */ 570 crit_enter(); 571 bp = geteblk(size); /* get a buffer */ 572 if (bp == NULL) { 573 crit_exit(); 574 error = ENOMEM; 575 } else { 576 bp->b_bcount = size; 577 bp->b_resid = bp->b_bcount; 578 bp->b_bio1.bio_offset = (off_t)sd->initialized << DEV_BSHIFT; /* read from here */ 579 bp->b_bio1.bio_driver_info = VINUM_SD(sdno); 580 bp->b_cmd = BUF_CMD_READ; /* read it back */ 581 crit_exit(); 582 sdio(&bp->b_bio1); 583 biowait(bp); 584 /* 585 * XXX Bug fix code. This is hopefully no 586 * longer needed (21 February 2000). 587 */ 588 if (bp->b_flags & B_ERROR) 589 error = bp->b_error; 590 else if ((*bp->b_data != 0) /* first word spammed */ 591 ||(bcmp(bp->b_data, &bp->b_data[1], bp->b_bcount - 1))) { /* or one of the others */ 592 printf("vinum: init error on %s, offset 0x%llx sectors\n", 593 sd->name, 594 (long long) sd->initialized); 595 verified = 0; 596 } else 597 verified = 1; 598 if (bp->b_qindex == 0) { /* not on a queue, */ 599 bp->b_flags |= B_INVAL; 600 bp->b_flags &= ~B_ERROR; 601 brelse(bp); /* is this kosher? */ 602 } 603 } 604 } else 605 verified = 1; 606 } 607 if (error == 0) { /* did it, */ 608 sd->initialized += size >> DEV_BSHIFT; /* moved this much further down */ 609 if (sd->initialized >= sd->sectors) { /* finished */ 610 sd->initialized = 0; 611 set_sd_state(sdno, sd_initialized, setstate_force); /* bring the sd up */ 612 log(LOG_INFO, "vinum: %s is %s\n", sd->name, sd_state(sd->state)); 613 save_config(); /* and save the updated configuration */ 614 } else /* more to go, */ 615 error = EAGAIN; /* ya'll come back, see? */ 616 } 617 return error; 618 } 619 620 /* Local Variables: */ 621 /* fill-column: 50 */ 622 /* End: */ 623