1 /* $NetBSD: rf_parityloggingdags.c,v 1.8 2002/08/02 03:42:34 oster Exp $ */ 2 /* 3 * Copyright (c) 1995 Carnegie-Mellon University. 4 * All rights reserved. 5 * 6 * Author: William V. Courtright II 7 * 8 * Permission to use, copy, modify and distribute this software and 9 * its documentation is hereby granted, provided that both the copyright 10 * notice and this permission notice appear in all copies of the 11 * software, derivative works or modified versions, and any portions 12 * thereof, and that both notices appear in supporting documentation. 13 * 14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 17 * 18 * Carnegie Mellon requests users of this software to return to 19 * 20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 21 * School of Computer Science 22 * Carnegie Mellon University 23 * Pittsburgh PA 15213-3890 24 * 25 * any improvements or extensions that they make and grant Carnegie the 26 * rights to redistribute these changes. 27 */ 28 29 /* 30 DAGs specific to parity logging are created here 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: rf_parityloggingdags.c,v 1.8 2002/08/02 03:42:34 oster Exp $"); 35 36 #include "rf_archs.h" 37 38 #if RF_INCLUDE_PARITYLOGGING > 0 39 40 #include <dev/raidframe/raidframevar.h> 41 42 #include "rf_raid.h" 43 #include "rf_dag.h" 44 #include "rf_dagutils.h" 45 #include "rf_dagfuncs.h" 46 #include "rf_debugMem.h" 47 #include "rf_paritylog.h" 48 #include "rf_general.h" 49 50 #include "rf_parityloggingdags.h" 51 52 /****************************************************************************** 53 * 54 * creates a DAG to perform a large-write operation: 55 * 56 * / Rod \ / Wnd \ 57 * H -- NIL- Rod - NIL - Wnd ------ NIL - T 58 * \ Rod / \ Xor - Lpo / 59 * 60 * The writes are not done until the reads complete because if they were done in 61 * parallel, a failure on one of the reads could leave the parity in an inconsistent 62 * state, so that the retry with a new DAG would produce erroneous parity. 63 * 64 * Note: this DAG has the nasty property that none of the buffers allocated for reading 65 * old data can be freed until the XOR node fires. Need to fix this. 66 * 67 * The last two arguments are the number of faults tolerated, and function for the 68 * redundancy calculation. The undo for the redundancy calc is assumed to be null 69 * 70 *****************************************************************************/ 71 72 void 73 rf_CommonCreateParityLoggingLargeWriteDAG( 74 RF_Raid_t * raidPtr, 75 RF_AccessStripeMap_t * asmap, 76 RF_DagHeader_t * dag_h, 77 void *bp, 78 RF_RaidAccessFlags_t flags, 79 RF_AllocListElem_t * allocList, 80 int nfaults, 81 int (*redFunc) (RF_DagNode_t *)) 82 { 83 RF_DagNode_t *nodes, *wndNodes, *rodNodes = NULL, *syncNode, *xorNode, 84 *lpoNode, *blockNode, *unblockNode, *termNode; 85 int nWndNodes, nRodNodes, i; 86 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout); 87 RF_AccessStripeMapHeader_t *new_asm_h[2]; 88 int nodeNum, asmNum; 89 RF_ReconUnitNum_t which_ru; 90 char *sosBuffer, *eosBuffer; 91 RF_PhysDiskAddr_t *pda; 92 RF_StripeNum_t parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout), asmap->raidAddress, &which_ru); 93 94 if (rf_dagDebug) 95 printf("[Creating parity-logging large-write DAG]\n"); 96 RF_ASSERT(nfaults == 1);/* this arch only single fault tolerant */ 97 dag_h->creator = "ParityLoggingLargeWriteDAG"; 98 99 /* alloc the Wnd nodes, the xor node, and the Lpo node */ 100 nWndNodes = asmap->numStripeUnitsAccessed; 101 RF_CallocAndAdd(nodes, nWndNodes + 6, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList); 102 i = 0; 103 wndNodes = &nodes[i]; 104 i += nWndNodes; 105 xorNode = &nodes[i]; 106 i += 1; 107 lpoNode = &nodes[i]; 108 i += 1; 109 blockNode = &nodes[i]; 110 i += 1; 111 syncNode = &nodes[i]; 112 i += 1; 113 unblockNode = &nodes[i]; 114 i += 1; 115 termNode = &nodes[i]; 116 i += 1; 117 118 dag_h->numCommitNodes = nWndNodes + 1; 119 dag_h->numCommits = 0; 120 dag_h->numSuccedents = 1; 121 122 rf_MapUnaccessedPortionOfStripe(raidPtr, layoutPtr, asmap, dag_h, new_asm_h, &nRodNodes, &sosBuffer, &eosBuffer, allocList); 123 if (nRodNodes > 0) 124 RF_CallocAndAdd(rodNodes, nRodNodes, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList); 125 126 /* begin node initialization */ 127 rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nRodNodes + 1, 0, 0, 0, dag_h, "Nil", allocList); 128 rf_InitNode(unblockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, nWndNodes + 1, 0, 0, dag_h, "Nil", allocList); 129 rf_InitNode(syncNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nWndNodes + 1, nRodNodes + 1, 0, 0, dag_h, "Nil", allocList); 130 rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 1, 0, 0, dag_h, "Trm", allocList); 131 132 /* initialize the Rod nodes */ 133 for (nodeNum = asmNum = 0; asmNum < 2; asmNum++) { 134 if (new_asm_h[asmNum]) { 135 pda = new_asm_h[asmNum]->stripeMap->physInfo; 136 while (pda) { 137 rf_InitNode(&rodNodes[nodeNum], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rod", allocList); 138 rodNodes[nodeNum].params[0].p = pda; 139 rodNodes[nodeNum].params[1].p = pda->bufPtr; 140 rodNodes[nodeNum].params[2].v = parityStripeID; 141 rodNodes[nodeNum].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru); 142 nodeNum++; 143 pda = pda->next; 144 } 145 } 146 } 147 RF_ASSERT(nodeNum == nRodNodes); 148 149 /* initialize the wnd nodes */ 150 pda = asmap->physInfo; 151 for (i = 0; i < nWndNodes; i++) { 152 rf_InitNode(&wndNodes[i], rf_wait, RF_TRUE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wnd", allocList); 153 RF_ASSERT(pda != NULL); 154 wndNodes[i].params[0].p = pda; 155 wndNodes[i].params[1].p = pda->bufPtr; 156 wndNodes[i].params[2].v = parityStripeID; 157 wndNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru); 158 pda = pda->next; 159 } 160 161 /* initialize the redundancy node */ 162 rf_InitNode(xorNode, rf_wait, RF_TRUE, redFunc, rf_NullNodeUndoFunc, NULL, 1, 1, 2 * (nWndNodes + nRodNodes) + 1, 1, dag_h, "Xr ", allocList); 163 xorNode->flags |= RF_DAGNODE_FLAG_YIELD; 164 for (i = 0; i < nWndNodes; i++) { 165 xorNode->params[2 * i + 0] = wndNodes[i].params[0]; /* pda */ 166 xorNode->params[2 * i + 1] = wndNodes[i].params[1]; /* buf ptr */ 167 } 168 for (i = 0; i < nRodNodes; i++) { 169 xorNode->params[2 * (nWndNodes + i) + 0] = rodNodes[i].params[0]; /* pda */ 170 xorNode->params[2 * (nWndNodes + i) + 1] = rodNodes[i].params[1]; /* buf ptr */ 171 } 172 xorNode->params[2 * (nWndNodes + nRodNodes)].p = raidPtr; /* xor node needs to get 173 * at RAID information */ 174 175 /* look for an Rod node that reads a complete SU. If none, alloc a 176 * buffer to receive the parity info. Note that we can't use a new 177 * data buffer because it will not have gotten written when the xor 178 * occurs. */ 179 for (i = 0; i < nRodNodes; i++) 180 if (((RF_PhysDiskAddr_t *) rodNodes[i].params[0].p)->numSector == raidPtr->Layout.sectorsPerStripeUnit) 181 break; 182 if (i == nRodNodes) { 183 RF_CallocAndAdd(xorNode->results[0], 1, rf_RaidAddressToByte(raidPtr, raidPtr->Layout.sectorsPerStripeUnit), (void *), allocList); 184 } else { 185 xorNode->results[0] = rodNodes[i].params[1].p; 186 } 187 188 /* initialize the Lpo node */ 189 rf_InitNode(lpoNode, rf_wait, RF_FALSE, rf_ParityLogOverwriteFunc, rf_ParityLogOverwriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h, "Lpo", allocList); 190 191 lpoNode->params[0].p = asmap->parityInfo; 192 lpoNode->params[1].p = xorNode->results[0]; 193 RF_ASSERT(asmap->parityInfo->next == NULL); /* parityInfo must 194 * describe entire 195 * parity unit */ 196 197 /* connect nodes to form graph */ 198 199 /* connect dag header to block node */ 200 RF_ASSERT(dag_h->numSuccedents == 1); 201 RF_ASSERT(blockNode->numAntecedents == 0); 202 dag_h->succedents[0] = blockNode; 203 204 /* connect the block node to the Rod nodes */ 205 RF_ASSERT(blockNode->numSuccedents == nRodNodes + 1); 206 for (i = 0; i < nRodNodes; i++) { 207 RF_ASSERT(rodNodes[i].numAntecedents == 1); 208 blockNode->succedents[i] = &rodNodes[i]; 209 rodNodes[i].antecedents[0] = blockNode; 210 rodNodes[i].antType[0] = rf_control; 211 } 212 213 /* connect the block node to the sync node */ 214 /* necessary if nRodNodes == 0 */ 215 RF_ASSERT(syncNode->numAntecedents == nRodNodes + 1); 216 blockNode->succedents[nRodNodes] = syncNode; 217 syncNode->antecedents[0] = blockNode; 218 syncNode->antType[0] = rf_control; 219 220 /* connect the Rod nodes to the syncNode */ 221 for (i = 0; i < nRodNodes; i++) { 222 rodNodes[i].succedents[0] = syncNode; 223 syncNode->antecedents[1 + i] = &rodNodes[i]; 224 syncNode->antType[1 + i] = rf_control; 225 } 226 227 /* connect the sync node to the xor node */ 228 RF_ASSERT(syncNode->numSuccedents == nWndNodes + 1); 229 RF_ASSERT(xorNode->numAntecedents == 1); 230 syncNode->succedents[0] = xorNode; 231 xorNode->antecedents[0] = syncNode; 232 xorNode->antType[0] = rf_trueData; /* carry forward from sync */ 233 234 /* connect the sync node to the Wnd nodes */ 235 for (i = 0; i < nWndNodes; i++) { 236 RF_ASSERT(wndNodes->numAntecedents == 1); 237 syncNode->succedents[1 + i] = &wndNodes[i]; 238 wndNodes[i].antecedents[0] = syncNode; 239 wndNodes[i].antType[0] = rf_control; 240 } 241 242 /* connect the xor node to the Lpo node */ 243 RF_ASSERT(xorNode->numSuccedents == 1); 244 RF_ASSERT(lpoNode->numAntecedents == 1); 245 xorNode->succedents[0] = lpoNode; 246 lpoNode->antecedents[0] = xorNode; 247 lpoNode->antType[0] = rf_trueData; 248 249 /* connect the Wnd nodes to the unblock node */ 250 RF_ASSERT(unblockNode->numAntecedents == nWndNodes + 1); 251 for (i = 0; i < nWndNodes; i++) { 252 RF_ASSERT(wndNodes->numSuccedents == 1); 253 wndNodes[i].succedents[0] = unblockNode; 254 unblockNode->antecedents[i] = &wndNodes[i]; 255 unblockNode->antType[i] = rf_control; 256 } 257 258 /* connect the Lpo node to the unblock node */ 259 RF_ASSERT(lpoNode->numSuccedents == 1); 260 lpoNode->succedents[0] = unblockNode; 261 unblockNode->antecedents[nWndNodes] = lpoNode; 262 unblockNode->antType[nWndNodes] = rf_control; 263 264 /* connect unblock node to terminator */ 265 RF_ASSERT(unblockNode->numSuccedents == 1); 266 RF_ASSERT(termNode->numAntecedents == 1); 267 RF_ASSERT(termNode->numSuccedents == 0); 268 unblockNode->succedents[0] = termNode; 269 termNode->antecedents[0] = unblockNode; 270 termNode->antType[0] = rf_control; 271 } 272 273 274 275 276 /****************************************************************************** 277 * 278 * creates a DAG to perform a small-write operation (either raid 5 or pq), which is as follows: 279 * 280 * Header 281 * | 282 * Block 283 * / | ... \ \ 284 * / | \ \ 285 * Rod Rod Rod Rop 286 * | \ /| \ / | \/ | 287 * | | | /\ | 288 * Wnd Wnd Wnd X 289 * | \ / | 290 * | \ / | 291 * \ \ / Lpo 292 * \ \ / / 293 * +-> Unblock <-+ 294 * | 295 * T 296 * 297 * 298 * R = Read, W = Write, X = Xor, o = old, n = new, d = data, p = parity. 299 * When the access spans a stripe unit boundary and is less than one SU in size, there will 300 * be two Rop -- X -- Wnp branches. I call this the "double-XOR" case. 301 * The second output from each Rod node goes to the X node. In the double-XOR 302 * case, there are exactly 2 Rod nodes, and each sends one output to one X node. 303 * There is one Rod -- Wnd -- T branch for each stripe unit being updated. 304 * 305 * The block and unblock nodes are unused. See comment above CreateFaultFreeReadDAG. 306 * 307 * Note: this DAG ignores all the optimizations related to making the RMWs atomic. 308 * it also has the nasty property that none of the buffers allocated for reading 309 * old data & parity can be freed until the XOR node fires. Need to fix this. 310 * 311 * A null qfuncs indicates single fault tolerant 312 *****************************************************************************/ 313 314 void 315 rf_CommonCreateParityLoggingSmallWriteDAG( 316 RF_Raid_t * raidPtr, 317 RF_AccessStripeMap_t * asmap, 318 RF_DagHeader_t * dag_h, 319 void *bp, 320 RF_RaidAccessFlags_t flags, 321 RF_AllocListElem_t * allocList, 322 RF_RedFuncs_t * pfuncs, 323 RF_RedFuncs_t * qfuncs) 324 { 325 RF_DagNode_t *xorNodes, *blockNode, *unblockNode, *nodes; 326 RF_DagNode_t *readDataNodes, *readParityNodes; 327 RF_DagNode_t *writeDataNodes, *lpuNodes; 328 RF_DagNode_t *unlockDataNodes = NULL, *termNode; 329 RF_PhysDiskAddr_t *pda = asmap->physInfo; 330 int numDataNodes = asmap->numStripeUnitsAccessed; 331 int numParityNodes = (asmap->parityInfo->next) ? 2 : 1; 332 int i, j, nNodes, totalNumNodes; 333 RF_ReconUnitNum_t which_ru; 334 int (*func) (RF_DagNode_t * node), (*undoFunc) (RF_DagNode_t * node); 335 int (*qfunc) (RF_DagNode_t * node); 336 char *name, *qname; 337 RF_StripeNum_t parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout), asmap->raidAddress, &which_ru); 338 #ifdef RAID_DIAGNOSTIC 339 long nfaults = qfuncs ? 2 : 1; 340 #endif /* RAID_DIAGNOSTIC */ 341 int lu_flag = (rf_enableAtomicRMW) ? 1 : 0; /* lock/unlock flag */ 342 343 if (rf_dagDebug) 344 printf("[Creating parity-logging small-write DAG]\n"); 345 RF_ASSERT(numDataNodes > 0); 346 RF_ASSERT(nfaults == 1); 347 dag_h->creator = "ParityLoggingSmallWriteDAG"; 348 349 /* DAG creation occurs in three steps: 1. count the number of nodes in 350 * the DAG 2. create the nodes 3. initialize the nodes 4. connect the 351 * nodes */ 352 353 /* Step 1. compute number of nodes in the graph */ 354 355 /* number of nodes: a read and write for each data unit a redundancy 356 * computation node for each parity node a read and Lpu for each 357 * parity unit a block and unblock node (2) a terminator node if 358 * atomic RMW an unlock node for each data unit, redundancy unit */ 359 totalNumNodes = (2 * numDataNodes) + numParityNodes + (2 * numParityNodes) + 3; 360 if (lu_flag) 361 totalNumNodes += numDataNodes; 362 363 nNodes = numDataNodes + numParityNodes; 364 365 dag_h->numCommitNodes = numDataNodes + numParityNodes; 366 dag_h->numCommits = 0; 367 dag_h->numSuccedents = 1; 368 369 /* Step 2. create the nodes */ 370 RF_CallocAndAdd(nodes, totalNumNodes, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList); 371 i = 0; 372 blockNode = &nodes[i]; 373 i += 1; 374 unblockNode = &nodes[i]; 375 i += 1; 376 readDataNodes = &nodes[i]; 377 i += numDataNodes; 378 readParityNodes = &nodes[i]; 379 i += numParityNodes; 380 writeDataNodes = &nodes[i]; 381 i += numDataNodes; 382 lpuNodes = &nodes[i]; 383 i += numParityNodes; 384 xorNodes = &nodes[i]; 385 i += numParityNodes; 386 termNode = &nodes[i]; 387 i += 1; 388 if (lu_flag) { 389 unlockDataNodes = &nodes[i]; 390 i += numDataNodes; 391 } 392 RF_ASSERT(i == totalNumNodes); 393 394 /* Step 3. initialize the nodes */ 395 /* initialize block node (Nil) */ 396 rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nNodes, 0, 0, 0, dag_h, "Nil", allocList); 397 398 /* initialize unblock node (Nil) */ 399 rf_InitNode(unblockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, nNodes, 0, 0, dag_h, "Nil", allocList); 400 401 /* initialize terminatory node (Trm) */ 402 rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 1, 0, 0, dag_h, "Trm", allocList); 403 404 /* initialize nodes which read old data (Rod) */ 405 for (i = 0; i < numDataNodes; i++) { 406 rf_InitNode(&readDataNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, nNodes, 1, 4, 0, dag_h, "Rod", allocList); 407 RF_ASSERT(pda != NULL); 408 readDataNodes[i].params[0].p = pda; /* physical disk addr 409 * desc */ 410 readDataNodes[i].params[1].p = rf_AllocBuffer(raidPtr, dag_h, pda, allocList); /* buffer to hold old 411 * data */ 412 readDataNodes[i].params[2].v = parityStripeID; 413 readDataNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, lu_flag, 0, which_ru); 414 pda = pda->next; 415 readDataNodes[i].propList[0] = NULL; 416 readDataNodes[i].propList[1] = NULL; 417 } 418 419 /* initialize nodes which read old parity (Rop) */ 420 pda = asmap->parityInfo; 421 i = 0; 422 for (i = 0; i < numParityNodes; i++) { 423 RF_ASSERT(pda != NULL); 424 rf_InitNode(&readParityNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, nNodes, 1, 4, 0, dag_h, "Rop", allocList); 425 readParityNodes[i].params[0].p = pda; 426 readParityNodes[i].params[1].p = rf_AllocBuffer(raidPtr, dag_h, pda, allocList); /* buffer to hold old 427 * parity */ 428 readParityNodes[i].params[2].v = parityStripeID; 429 readParityNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru); 430 readParityNodes[i].propList[0] = NULL; 431 pda = pda->next; 432 } 433 434 /* initialize nodes which write new data (Wnd) */ 435 pda = asmap->physInfo; 436 for (i = 0; i < numDataNodes; i++) { 437 RF_ASSERT(pda != NULL); 438 rf_InitNode(&writeDataNodes[i], rf_wait, RF_TRUE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, nNodes, 4, 0, dag_h, "Wnd", allocList); 439 writeDataNodes[i].params[0].p = pda; /* physical disk addr 440 * desc */ 441 writeDataNodes[i].params[1].p = pda->bufPtr; /* buffer holding new 442 * data to be written */ 443 writeDataNodes[i].params[2].v = parityStripeID; 444 writeDataNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru); 445 446 if (lu_flag) { 447 /* initialize node to unlock the disk queue */ 448 rf_InitNode(&unlockDataNodes[i], rf_wait, RF_FALSE, rf_DiskUnlockFunc, rf_DiskUnlockUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h, "Und", allocList); 449 unlockDataNodes[i].params[0].p = pda; /* physical disk addr 450 * desc */ 451 unlockDataNodes[i].params[1].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, lu_flag, which_ru); 452 } 453 pda = pda->next; 454 } 455 456 457 /* initialize nodes which compute new parity */ 458 /* we use the simple XOR func in the double-XOR case, and when we're 459 * accessing only a portion of one stripe unit. the distinction 460 * between the two is that the regular XOR func assumes that the 461 * targbuf is a full SU in size, and examines the pda associated with 462 * the buffer to decide where within the buffer to XOR the data, 463 * whereas the simple XOR func just XORs the data into the start of 464 * the buffer. */ 465 if ((numParityNodes == 2) || ((numDataNodes == 1) && (asmap->totalSectorsAccessed < raidPtr->Layout.sectorsPerStripeUnit))) { 466 func = pfuncs->simple; 467 undoFunc = rf_NullNodeUndoFunc; 468 name = pfuncs->SimpleName; 469 if (qfuncs) { 470 qfunc = qfuncs->simple; 471 qname = qfuncs->SimpleName; 472 } 473 } else { 474 func = pfuncs->regular; 475 undoFunc = rf_NullNodeUndoFunc; 476 name = pfuncs->RegularName; 477 if (qfuncs) { 478 qfunc = qfuncs->regular; 479 qname = qfuncs->RegularName; 480 } 481 } 482 /* initialize the xor nodes: params are {pda,buf} from {Rod,Wnd,Rop} 483 * nodes, and raidPtr */ 484 if (numParityNodes == 2) { /* double-xor case */ 485 for (i = 0; i < numParityNodes; i++) { 486 rf_InitNode(&xorNodes[i], rf_wait, RF_TRUE, func, undoFunc, NULL, 1, nNodes, 7, 1, dag_h, name, allocList); /* no wakeup func for 487 * xor */ 488 xorNodes[i].flags |= RF_DAGNODE_FLAG_YIELD; 489 xorNodes[i].params[0] = readDataNodes[i].params[0]; 490 xorNodes[i].params[1] = readDataNodes[i].params[1]; 491 xorNodes[i].params[2] = readParityNodes[i].params[0]; 492 xorNodes[i].params[3] = readParityNodes[i].params[1]; 493 xorNodes[i].params[4] = writeDataNodes[i].params[0]; 494 xorNodes[i].params[5] = writeDataNodes[i].params[1]; 495 xorNodes[i].params[6].p = raidPtr; 496 xorNodes[i].results[0] = readParityNodes[i].params[1].p; /* use old parity buf as 497 * target buf */ 498 } 499 } else { 500 /* there is only one xor node in this case */ 501 rf_InitNode(&xorNodes[0], rf_wait, RF_TRUE, func, undoFunc, NULL, 1, nNodes, (2 * (numDataNodes + numDataNodes + 1) + 1), 1, dag_h, name, allocList); 502 xorNodes[0].flags |= RF_DAGNODE_FLAG_YIELD; 503 for (i = 0; i < numDataNodes + 1; i++) { 504 /* set up params related to Rod and Rop nodes */ 505 xorNodes[0].params[2 * i + 0] = readDataNodes[i].params[0]; /* pda */ 506 xorNodes[0].params[2 * i + 1] = readDataNodes[i].params[1]; /* buffer pointer */ 507 } 508 for (i = 0; i < numDataNodes; i++) { 509 /* set up params related to Wnd and Wnp nodes */ 510 xorNodes[0].params[2 * (numDataNodes + 1 + i) + 0] = writeDataNodes[i].params[0]; /* pda */ 511 xorNodes[0].params[2 * (numDataNodes + 1 + i) + 1] = writeDataNodes[i].params[1]; /* buffer pointer */ 512 } 513 xorNodes[0].params[2 * (numDataNodes + numDataNodes + 1)].p = raidPtr; /* xor node needs to get 514 * at RAID information */ 515 xorNodes[0].results[0] = readParityNodes[0].params[1].p; 516 } 517 518 /* initialize the log node(s) */ 519 pda = asmap->parityInfo; 520 for (i = 0; i < numParityNodes; i++) { 521 RF_ASSERT(pda); 522 rf_InitNode(&lpuNodes[i], rf_wait, RF_FALSE, rf_ParityLogUpdateFunc, rf_ParityLogUpdateUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h, "Lpu", allocList); 523 lpuNodes[i].params[0].p = pda; /* PhysDiskAddr of parity */ 524 lpuNodes[i].params[1].p = xorNodes[i].results[0]; /* buffer pointer to 525 * parity */ 526 pda = pda->next; 527 } 528 529 530 /* Step 4. connect the nodes */ 531 532 /* connect header to block node */ 533 RF_ASSERT(dag_h->numSuccedents == 1); 534 RF_ASSERT(blockNode->numAntecedents == 0); 535 dag_h->succedents[0] = blockNode; 536 537 /* connect block node to read old data nodes */ 538 RF_ASSERT(blockNode->numSuccedents == (numDataNodes + numParityNodes)); 539 for (i = 0; i < numDataNodes; i++) { 540 blockNode->succedents[i] = &readDataNodes[i]; 541 RF_ASSERT(readDataNodes[i].numAntecedents == 1); 542 readDataNodes[i].antecedents[0] = blockNode; 543 readDataNodes[i].antType[0] = rf_control; 544 } 545 546 /* connect block node to read old parity nodes */ 547 for (i = 0; i < numParityNodes; i++) { 548 blockNode->succedents[numDataNodes + i] = &readParityNodes[i]; 549 RF_ASSERT(readParityNodes[i].numAntecedents == 1); 550 readParityNodes[i].antecedents[0] = blockNode; 551 readParityNodes[i].antType[0] = rf_control; 552 } 553 554 /* connect read old data nodes to write new data nodes */ 555 for (i = 0; i < numDataNodes; i++) { 556 RF_ASSERT(readDataNodes[i].numSuccedents == numDataNodes + numParityNodes); 557 for (j = 0; j < numDataNodes; j++) { 558 RF_ASSERT(writeDataNodes[j].numAntecedents == numDataNodes + numParityNodes); 559 readDataNodes[i].succedents[j] = &writeDataNodes[j]; 560 writeDataNodes[j].antecedents[i] = &readDataNodes[i]; 561 if (i == j) 562 writeDataNodes[j].antType[i] = rf_antiData; 563 else 564 writeDataNodes[j].antType[i] = rf_control; 565 } 566 } 567 568 /* connect read old data nodes to xor nodes */ 569 for (i = 0; i < numDataNodes; i++) 570 for (j = 0; j < numParityNodes; j++) { 571 RF_ASSERT(xorNodes[j].numAntecedents == numDataNodes + numParityNodes); 572 readDataNodes[i].succedents[numDataNodes + j] = &xorNodes[j]; 573 xorNodes[j].antecedents[i] = &readDataNodes[i]; 574 xorNodes[j].antType[i] = rf_trueData; 575 } 576 577 /* connect read old parity nodes to write new data nodes */ 578 for (i = 0; i < numParityNodes; i++) { 579 RF_ASSERT(readParityNodes[i].numSuccedents == numDataNodes + numParityNodes); 580 for (j = 0; j < numDataNodes; j++) { 581 readParityNodes[i].succedents[j] = &writeDataNodes[j]; 582 writeDataNodes[j].antecedents[numDataNodes + i] = &readParityNodes[i]; 583 writeDataNodes[j].antType[numDataNodes + i] = rf_control; 584 } 585 } 586 587 /* connect read old parity nodes to xor nodes */ 588 for (i = 0; i < numParityNodes; i++) 589 for (j = 0; j < numParityNodes; j++) { 590 readParityNodes[i].succedents[numDataNodes + j] = &xorNodes[j]; 591 xorNodes[j].antecedents[numDataNodes + i] = &readParityNodes[i]; 592 xorNodes[j].antType[numDataNodes + i] = rf_trueData; 593 } 594 595 /* connect xor nodes to write new parity nodes */ 596 for (i = 0; i < numParityNodes; i++) { 597 RF_ASSERT(xorNodes[i].numSuccedents == 1); 598 RF_ASSERT(lpuNodes[i].numAntecedents == 1); 599 xorNodes[i].succedents[0] = &lpuNodes[i]; 600 lpuNodes[i].antecedents[0] = &xorNodes[i]; 601 lpuNodes[i].antType[0] = rf_trueData; 602 } 603 604 for (i = 0; i < numDataNodes; i++) { 605 if (lu_flag) { 606 /* connect write new data nodes to unlock nodes */ 607 RF_ASSERT(writeDataNodes[i].numSuccedents == 1); 608 RF_ASSERT(unlockDataNodes[i].numAntecedents == 1); 609 writeDataNodes[i].succedents[0] = &unlockDataNodes[i]; 610 unlockDataNodes[i].antecedents[0] = &writeDataNodes[i]; 611 unlockDataNodes[i].antType[0] = rf_control; 612 613 /* connect unlock nodes to unblock node */ 614 RF_ASSERT(unlockDataNodes[i].numSuccedents == 1); 615 RF_ASSERT(unblockNode->numAntecedents == (numDataNodes + (nfaults * numParityNodes))); 616 unlockDataNodes[i].succedents[0] = unblockNode; 617 unblockNode->antecedents[i] = &unlockDataNodes[i]; 618 unblockNode->antType[i] = rf_control; 619 } else { 620 /* connect write new data nodes to unblock node */ 621 RF_ASSERT(writeDataNodes[i].numSuccedents == 1); 622 RF_ASSERT(unblockNode->numAntecedents == (numDataNodes + (nfaults * numParityNodes))); 623 writeDataNodes[i].succedents[0] = unblockNode; 624 unblockNode->antecedents[i] = &writeDataNodes[i]; 625 unblockNode->antType[i] = rf_control; 626 } 627 } 628 629 /* connect write new parity nodes to unblock node */ 630 for (i = 0; i < numParityNodes; i++) { 631 RF_ASSERT(lpuNodes[i].numSuccedents == 1); 632 lpuNodes[i].succedents[0] = unblockNode; 633 unblockNode->antecedents[numDataNodes + i] = &lpuNodes[i]; 634 unblockNode->antType[numDataNodes + i] = rf_control; 635 } 636 637 /* connect unblock node to terminator */ 638 RF_ASSERT(unblockNode->numSuccedents == 1); 639 RF_ASSERT(termNode->numAntecedents == 1); 640 RF_ASSERT(termNode->numSuccedents == 0); 641 unblockNode->succedents[0] = termNode; 642 termNode->antecedents[0] = unblockNode; 643 termNode->antType[0] = rf_control; 644 } 645 646 647 void 648 rf_CreateParityLoggingSmallWriteDAG( 649 RF_Raid_t * raidPtr, 650 RF_AccessStripeMap_t * asmap, 651 RF_DagHeader_t * dag_h, 652 void *bp, 653 RF_RaidAccessFlags_t flags, 654 RF_AllocListElem_t * allocList, 655 RF_RedFuncs_t * pfuncs, 656 RF_RedFuncs_t * qfuncs) 657 { 658 dag_h->creator = "ParityLoggingSmallWriteDAG"; 659 rf_CommonCreateParityLoggingSmallWriteDAG(raidPtr, asmap, dag_h, bp, flags, allocList, &rf_xorFuncs, NULL); 660 } 661 662 663 void 664 rf_CreateParityLoggingLargeWriteDAG( 665 RF_Raid_t * raidPtr, 666 RF_AccessStripeMap_t * asmap, 667 RF_DagHeader_t * dag_h, 668 void *bp, 669 RF_RaidAccessFlags_t flags, 670 RF_AllocListElem_t * allocList, 671 int nfaults, 672 int (*redFunc) (RF_DagNode_t *)) 673 { 674 dag_h->creator = "ParityLoggingSmallWriteDAG"; 675 rf_CommonCreateParityLoggingLargeWriteDAG(raidPtr, asmap, dag_h, bp, flags, allocList, 1, rf_RegularXorFunc); 676 } 677 #endif /* RF_INCLUDE_PARITYLOGGING > 0 */ 678