1 /* File cuprstab.c. Contains function uPartnStabilizer, the main function for a
2 program that may be used to compute the stabilizer in a permutation group
3 of an unordered partition. Also contains functions as follows:
4
5 uParStabRefnInitialize: Initialize set stabilizer refinement functions.
6 setStabRefine: A refinement family based on the set stabilizer
7 property.
8 isSetStabReducible: A function to check SSS-reducibility. */
9
10 #include <stddef.h>
11 #include <stdlib.h>
12
13 #include "group.h"
14
15 #include "compcrep.h"
16 #include "compsg.h"
17 #include "errmesg.h"
18 #include "orbrefn.h"
19
20 CHECK( cuprst)
21
22 extern GroupOptions options;
23
24 static RefinementMapping uPartnStabRefine;
25 static ReducChkFn isUPartnStabReducible;
26 static void initializeUPartnStabRefn( Unsigned xDegree);
27
28 static Partition *knownIrreducible[10] /* Null terminated 0-base list of */
29 = {NULL}; /* point sets Lambda for which top */
30 /* partition on UpsilonStack is */
31 /* known to be SSS_Lambda irred. */
32
33 /* COPIED FROM ORBREFN */
34 #define HASH( i, j) ( (7L * i + j) % hashTableSize )
35
36 typedef struct RefnListEntry {
37 Unsigned i; /* Cell number in UpsilonTop to split. */
38 Unsigned j; /* Orbit number of G^(level) in split. */
39 Unsigned newCellSize; /* Size of new cell created by split. */
40 struct RefnListEntry *hashLink; /* List of refns with same hash value. */
41 struct RefnListEntry *next; /* Next refn on list of all refinements. */
42 struct RefnListEntry *last; /* Last refn on list of all refinements. */
43 } RefnListEntry;
44
45 static struct {
46 Unsigned groupCount;
47 PermGroup *group[10];
48 RefnListEntry **hashTable[10];
49 RefnListEntry *refnList[10];
50 Unsigned oldLevel[10];
51 RefnListEntry *freeListHeader[10];
52 RefnListEntry *inUseListHeader[10];
53 } refnData = {0};
54
55 static Unsigned trueGroupCount, hashTableSize;
56 static RefnListEntry **hashTable, *freeListHeader, *inUseListHeader;
57 /* END COPIED CODE. */
58
59 static Unsigned currentZDepth[10];
60
61 static Unsigned *totalSize;
62
63
64 /*-------------------------- uPartnStabilizer ------------------------------*/
65
66 /* Function uPartnStabilizer. Returns a new permutation group representing the
67 stabilizer in a permutation group G of an unordered partition Lambda of the
68 point set Omega. The algorithm is based on Figure 9 in the paper
69 "Permutation group algorithms based on partitions" by the author. */
70
71 #define familyParm familyParm_L
72
uPartnStabilizer(PermGroup * const G,const Partition * const Lambda,PermGroup * const L)73 PermGroup *uPartnStabilizer(
74 PermGroup *const G, /* The containing permutation group. */
75 const Partition *const Lambda, /* The point set to be stabilized. */
76 PermGroup *const L) /* A (possibly trivial) known subgroup of the
77 stabilizer in G of Lambda. (A null pointer
78 designates a trivial group.) */
79 {
80 #ifdef xxxx
81 RefinementFamily OOO_G, SSS_Lambda;
82 RefinementFamily *refnFamList[3];
83 ReducChkFn *reducChkList[3];
84 SpecialRefinementDescriptor *specialRefinement[3];
85 ExtraDomain* extra[2];
86
87 OOO_G.refine = orbRefine;
88 OOO_G.familyParm[0].ptrParm = G;
89
90 SSS_Lambda.refine = uPartnStabRefine;
91 SSS_Lambda.familyParm[0].ptrParm = (void *) Lambda;
92
93 refnFamList[0] = &OOO_G;
94 refnFamList[1] = &SSS_Lambda;
95 refnFamList[2] = NULL;
96
97 reducChkList[0] = isOrbReducible;
98 reducChkList[1] = isUPartnStabReducible;
99 reducChkList[2] = NULL;
100
101 specialRefinement[0] = malloc( sizeof(SpecialRefinementDescriptor) );
102 specialRefinement[0]->refnType = 'O';
103 specialRefinement[0]->leftGroup = G;
104 specialRefinement[0]->rightGroup = G;
105
106 specialRefinement[1] = NULL;
107 specialRefinement[2] = NULL;
108
109 initializeOrbRefine( G);
110 initializeUPartnStabRefn();
111
112 ex = extra[0] = allocExtraDomain();
113 xDegree = extra->xDegree[0] = numberOfCells( Lambda);
114 ex->xPsiStack = newCellPartitionStack( xDegree);
115 ex->xUpsilonStack = newCellPartitionStack( xDegree);
116 ex->xRRR = (Refinement *) malloc( (xDegree+2) * sizeof(Refinement) );
117 ex->applyAfter =
118 (UnsignedS *) malloc( (xDegree+2) * sizeof(UnsignedS) );
119 ex->xA_ =
120 (UnsignedS *) malloc( (xDegree+2) * sizeof(UnsignedS) );
121 ex->xB_ =
122 (UnsignedS *) malloc( (xDegree+2) * sizeof(UnsignedS) );
123 extra[1] = NULL;
124
125 return computeSubgroup( G, NULL, refnFamList, reducChkList,
126 specialRefinement, extra, L);
127 #endif
128 }
129 #undef familyParm
130
131
132 /*-------------------------- uPartnImage -----------------------------------*/
133
134 /* Function uPartnImage. Returns a new permutation in a specified group G mapping
135 a specified unordered partition Lambda to a specified unordered partition
136 Xi. The algorithm is based on Figure 9 in the paper "Permutation group algorithms
137 based on partitions" by the author. */
138
uPartnImage(PermGroup * const G,const Partition * const Lambda,const Partition * const Xi,PermGroup * const L_L,PermGroup * const L_R)139 Permutation *uPartnImage(
140 PermGroup *const G, /* The containing permutation group. */
141 const Partition *const Lambda, /* One of the partitions. */
142 const Partition *const Xi, /* The other partition. */
143 PermGroup *const L_L, /* A (possibly trivial) known subgroup of the
144 stabilizer in G of Lambda. (A null pointer
145 designates a trivial group.) */
146 PermGroup *const L_R) /* A (possibly trivial) known subgroup of the
147 stabilizer in G of Xi. (A null pointer
148 designates a trivial group.) */
149 {
150 #ifdef XXXXXX
151 RefinementFamily OOO_G, SSS_Lambda_Xi;
152 RefinementFamily *refnFamList[3];
153 ReducChkFn *reducChkList[3];
154 SpecialRefinementDescriptor *specialRefinement[3];
155
156 OOO_G.refine = orbRefine;
157 OOO_G.familyParm_L[0].ptrParm = G;
158 OOO_G.familyParm_R[0].ptrParm = G;
159
160 SSS_Lambda_Xi.refine = uPartnStabRefine;
161 SSS_Lambda_Xi.familyParm_L[0].ptrParm = (void *) Lambda;
162 SSS_Lambda_Xi.familyParm_R[0].ptrParm = (void *) Xi;
163
164 refnFamList[0] = &OOO_G;
165 refnFamList[1] = &SSS_Lambda_Xi;
166 refnFamList[2] = NULL;
167
168 reducChkList[0] = isOrbReducible;
169 reducChkList[1] = isUPartnStabReducible;
170 reducChkList[2] = NULL;
171
172 specialRefinement[0] = malloc( sizeof(SpecialRefinementDescriptor) );
173 specialRefinement[0]->refnType = 'O';
174 specialRefinement[0]->leftGroup = G;
175 specialRefinement[0]->rightGroup = G;
176
177 specialRefinement[1] = NULL;
178 specialRefinement[2] = NULL;
179
180 initializeOrbRefine( G);
181 initializeUPartnStabRefn();
182
183 return computeCosetRep( G, NULL, refnFamList, reducChkList,
184 specialRefinement, L_L, L_R);
185 #endif
186 }
187
188
189 /*-------------------------- initializePartnStabRefn ----------------------*/
190
initializePartnStabRefn(Unsigned xDegree)191 static void initializePartnStabRefn(
192 Unsigned xDegree)
193 {
194 knownIrreducible[0] = NULL;
195 currentZDepth[0] = 0;
196 totalSize = (UnsignedS *) malloc( xDegree * sizeof(UnsignedS) );
197 totalSize[1] = xDegree;
198 }
199
200
201 /*-------------------------- uPartnStabRefine ------------------------------*/
202
203 /* The function implements the refinement family uPartnStabRefine. Here
204 ssS_{Lambda,i,j,p} acting on Pi (the top partition of UpsilonStack) splits
205 from cell i of Pi those points lying in the union of the j'th cell group of
206 Pi^(p) (the top partition on extra[p]->xUpsilonStack) on Omega^(p). Note
207 Lambda is the base partition for extra[p]->xUpsilonStack
208
209 The family parameter is:
210 familyParm[0].ptrParm: extra[p]->xUpsilonStack
211 The refinement parameters are:
212 refnParm[0].intParm: i,
213 refnParm[1].intParm: j.
214 */
215
uPartnStabRefine(const RefinementParm familyParm[],const RefinementParm refnParm[],PartitionStack * const UpsilonStack)216 static SplitSize uPartnStabRefine(
217 const RefinementParm familyParm[],
218 const RefinementParm refnParm[],
219 PartitionStack *const UpsilonStack) /* The partition stack to refine. */
220 {
221 #ifdef xxxx
222 CellPartitionStack *xUpsilonStack = familyParm[0].ptrParm;
223 Partition *Lambda = xUpsilonStack->basePartn;
224 Unsigned i = refnParm[0].intParm,
225 j = refnParm[1].intParm;
226 Unsigned m, k, r, last, left, right, temp, startNewCell, pt, t;
227 UnsignedS *const pointList = UpsilonStack->pointList,
228 *const invPointList = UpsilonStack->invPointList,
229 *const parent = UpsilonStack->parent,
230 *const startCell = UpsilonStack->startCell,
231 *const cellNumber = UpsilonStack->cellNumber,
232 *const cellSize = UpsilonStack->cellSize,
233 *const xCellList = xUpsilonStack->cellList,
234 *const xCellGroupNumber = xUpsilonStack->cellGroupNumber,
235 *const xStartCellGroup = xUpsilonStack->startCellGroup,
236 *const xCellGroupSize = xUpsilonStack->cellGroupSize,
237 *const LambdaCellNumber = Lambda->cellNumber;
238 SplitSize split;
239 BOOLEAN cellSplits;
240
241 /* First check if the refinement acts nontrivially on UpsilonTop. If not
242 return immediately. */
243 cellSplits = FALSE;
244 for ( m = startCell[cellToSplit]+1 , last = m -1 + cellSize[cellToSplit] ;
245 m < last ; ++m )
246 if ( (xCellGroupNumber[LambdaCellNumber[pointList[m]]] == j) !=
247 (xCellGroupNumber[LambdaCelllNumber[pointList[m-1]]] == j) ) {
248 cellSplits = TRUE;
249 break;
250 }
251 if ( !cellSplits ) {
252 split.oldCellSize = cellSize[cellToSplit];
253 split.newCellSize = 0;
254 return split;
255 }
256
257 /* Now split cell cellToSplit of UpsilonTop. A variation of the splitting
258 algorithm used in quicksort is applied. */
259 if ( cellSize[i] <= xUpsilonStack->totalGroupSize[j] ) {
260 left = startCell[i] - 1;
261 right = startCell[i] + cellSize[i];
262 while ( left < right ) {
263 while ( xCellGroupNumber[LambdaCellNumber[pointList[++left]]] != j )
264 ;
265 while ( xCellGroupNumber[LambdaCellNumber[pointList[--right]]] == j )
266 ;
267 if ( left < right ) {
268 EXCHANGE( pointList[left], pointList[right], temp)
269 EXCHANGE( invPointList[pointList[left]], invPointList[pointList[right]], temp)
270 }
271 }
272 startNewCell = left;
273 }
274 else {
275 startNewCell = startCell[i] + cellSize[i];
276 for ( k = xStartCellGroup[j] ; k < xStartCellGroup[j] +
277 xCellGroupSize[j] ; ++k ) {
278 t = xCellList[k];
279 for ( r = Lambda->startCell[t] ;
280 LambdaCellNumber[Lambda->pointList[r]] == t ; ++r) {
281 pt = Lambda->pointList[r];
282 if ( cellNumber[pt] == i ) {
283 --startNewCell;
284 m = invPointList[pt];
285 EXCHANGE( pointList[m], pointList[startNewCell], temp);
286 EXCHANGE( invPointList[pointList[m]], invPointList[pointList[startNewCell]],
287 temp);
288 }
289 }
290 }
291 }
292
293 ++UpsilonStack->height;
294 for ( m = startNewCell ; m < last ; ++m )
295 cellNumber[pointList[m]] = UpsilonStack->height;
296 startCell[UpsilonStack->height] = startNewCell;
297 parent[UpsilonStack->height] = i;
298 cellSize[UpsilonStack->height] = last - startNewCell;
299 cellSize[cellToSplit] -= cellSize[UpsilonStack->height];
300 split.oldCellSize = cellSize[i];
301 split.newCellSize = cellSize[UpsilonStack->height];
302 return split;
303 #endif
304 }
305
306
307 /*-------------------------- initializeUPartnStabRefine --------------------------*/
308
initializeUPartnStabRefine(PermGroup * G)309 void initializeUPartnStabRefine( PermGroup *G)
310 {
311 #ifdef xxxx
312 /* Compute hash table size. */
313 if ( refnData.groupCount == 0 ) {
314 hashTableSize = G->degree;
315 while ( hashTableSize > 11 &&
316 (hashTableSize % 2 == 0 || hashTableSize % 3 == 0 ||
317 hashTableSize % 5 == 0 || hashTableSize % 7 == 0) )
318 --hashTableSize;
319 }
320
321 if ( refnData.groupCount < 9) {
322 refnData.group[refnData.groupCount] = G;
323 refnData.hashTable[refnData.groupCount] = allocPtrArrayDegree();
324 refnData.refnList[refnData.groupCount] =
325 malloc( G->degree * (sizeof(RefnListEntry)+2) );
326 if ( !refnData.refnList[refnData.groupCount] )
327 ERROR( "initializeOrbRefine", "Memory allocation error.")
328 refnData.oldLevel[refnData.groupCount] = UNKNOWN;
329 ++refnData.groupCount;
330 trueGroupCount = refnData.groupCount;
331 }
332 else
333 ERROR( "initializeOrbRefine", "Orbit refinement limited to ten groups.")
334 #endif
335 }
336
337
338 /*-------------------------- xPartnStabRefine ------------------------------*/
339
340 #define RESTORE_ZERO for ( r = 1 ; r <= nonZeroCount ; ++r ) \
341 zeroArray[nonZeroPosition[r]] = 0; \
342 nonZeroCount = 0;
343
344 /* The function implements the refinement family xPartnStabRefine. Here
345 xssS_{Pi,i,j,m,p} acting on Pi^(p) (the top partition of PiStack^(p))
346 splits from cell group i of Pi^(p) those cells intersecting cell j of Pi
347 in exactly m points. /
348
349 The family parameter is:
350 familyParm[0].ptrParm: extra[p]->xUpsilonStack
351 The refinement parameters are:
352 refnParm[0].intParm: i,
353 refnParm[1].intParm: j.
354 refnParm[2].intParm: m.
355 */
356
partnStabRefine(const RefinementParm familyParm[],const RefinementParm refnParm[],PartitionStack * const UpsilonStack)357 static SplitSize partnStabRefine(
358 const RefinementParm familyParm[],
359 const RefinementParm refnParm[],
360 PartitionStack *const UpsilonStack) /* The partition stack to refine. */
361 {
362 #ifdef xxxx
363 static Unsigned zeroArrayDegree = 0;
364 static Unsigned nonZeroCount;
365 static UnsignedS *zeroArray = NULL;
366 static UnsignedS *nonZeroPosition = NULL;
367 PartitionStack *xUpsilonStack = familyParm[1].ptrParm;
368 Partition *Lambda = xUpsilonStack->basePartn;
369 Unsigned i = refnParm[0].intParm,
370 j = refnParm[1].intParm,
371 m = refnParm[2].intParm;
372 Unsigned t, last, k, r, temp, left, right, xStartNewCellGroup;
373 UnsignedS *const pointList = UpsilonStack->pointList,
374 *const invPointList = UpsilonStack->invPointList,
375 *const parent = UpsilonStack->parent,
376 *const startCell = UpsilonStack->startCell,
377 *const cellNumber = UpsilonStack->cellNumber,
378 *const cellSize = UpsilonStack->cellSize,
379 *const xCellList = xUpsilonStack->cellList,
380 *const xCellGroupNumber = xUpsilonStack->cellGroupNumber,
381 *const xStartCellGroup = xUpsilonStack->startCellGroup,
382 *const xCellGroupSize = xUpsilonStack->cellGroupSize,
383 *const LambdaCellNumber = Lambda->cellNumber;
384 SplitSize split;
385 BOOLEAN splitFlag, noSplitFlag;
386
387 if ( zeroArrayDegree != Lambda->cellCount ) {
388 if ( zeroArrayDegree != 0 ) {
389 free( zeroArray );
390 free( nonZeroPosition);
391 }
392 zeroArrayDegree = Lambda->cellCount;
393 zeroArray = malloc( (zeroArrayDegree+2)*sizeof(UnsignedS) );
394 for ( r = 1 ; r <= zeroArrayDegree ; ++r )
395 zeroArrayDegree[r] = 0;
396 nonZeroPositionCount = 0;
397 nonZeroPosition = malloc( (zeroArrayDegree+2)*sizeof(UnsignedS) );
398 }
399
400 /* Here we set zeroArray[j] to the cardinality of
401 (cell j of UpsilonTop) intersect (cell k of Lambda) for each k in
402 cell group i of xUpsilonTop. */
403 if ( xUpsilonStack->totalGroupSize[i] <= cellSize[j] )
404 for ( k = xStartCellGroup[i] ; k < xStartCellGroup[i] +
405 xCellGroupSize[i] ; ++k ) {
406 for ( r = Lambda->startCell[k] ; r <= Lambda->startCell[k] +
407 Lambda->cellSize[k] ; ++ r ) {
408 if ( cellNumber[Lambda->pointList[r]] == j ) {
409 if ( zeroArray[k] == 0 )
410 nonZeroPosition[++nonZeroCount] = k;
411 ++zeroArray[k];
412 }
413 }
414 else
415 for ( r = startCell[j] ; r < startCell[j]+cellSize[j] ; ++j ) {
416 t = LambdaCellNumber[pointList[r]] );
417 if ( xCellGroupNumber[t] == i ) {
418 if ( zeroArray[t] == 0 )
419 nonZeroPosition[++nonZeroCount] = t;
420 ++zeroArray[t];
421 }
422 }
423
424 /* Now reset zeroArray and return immediately if the cell group does not
425 split. */
426 splitFlag = nonSplitFlag = FALSE;
427 if ( xUpsilonStack->cellCount > nonZeroCount )
428 if ( m == 0 )
429 splitFlag = TRUE;
430 else
431 nonSplitFlag = TRUE;
432 for ( r = 1 ; r <= nonZeroCount && !splitFlag ; ++r )
433 if ( zeroArray[nonZeroPosition[r]] == m )
434 splitFlag = TRUE;
435 for ( r = 1 ; r <= nonZeroCount && !nonSplitFlag ; ++r )
436 if ( zeroArray[nonZeroPosition[r]] != m )
437 nonSplitFlag = TRUE;
438 if ( !splitFlag || !nonSplitFlag ) {
439 RESTORE_ZERO
440 split.oldCellSize = cellSize[cellToSplit];
441 split.newCellSize = 0;
442 return split;
443 }
444
445 /* Now split cell group i of xUpsilonTop. A variation of the splitting
446 algorithm used in quicksort is applied. */
447 last = xStartCellGroup[i] + xCellGroupSize[i];
448 if ( xUpsilonStack->cellGroupSize[i] <= cellSize[j] ) {
449 left = xStartCellGroup[i] - 1;
450 right = xStartCellGroup[i] + xCellGroupSize[i];
451 while ( left < right ) {
452 while ( zeroArray[++left]) != m )
453 ;
454 while ( zeroArray[--right]) == m )
455 ;
456 if ( left < right ) {
457 EXCHANGE( xCellList[left], xCellList[right], temp)
458 EXCHANGE( xInvCellList[xCellList[left]],
459 xInvCellList[xCellList[right]], temp)
460 }
461 xStartNewCellGroup = left;
462 }
463 }
464 else {
465 xStartNewCellGroup = last;
466 for ( r = startCell[j] ; r < startCell[j] + cellSize[j] ; ++r ) {
467 t = LambdaCellNumber[pointList[r]];
468 if ( zeroArray[t] == m ) {
469 zeroArray[t] = UNDEFINED;
470 --xStartNewCellGroup;
471 EXCHANGE( xCellList[t], xCellList[xStartNewCellGroup], temp)
472 EXCHANGE( xInvCellList[xCellList[t]],
473 xInvCellList[xCellList[xStartNewCellGroup]], temp)
474 }
475 }
476 }
477
478 ++xUpsilonStack->height;
479 xUpsilonStack->totalGroupSize[xUpsilonStack->height] = 0;
480 for ( r = xStartNewCellGroup ; r < last ; ++m ) {
481 xCellGroupNumber[xCellList[r]] = xUpsilonStack->height;
482 xUpsilonStack->totalGroupSize[xUpsilonStack->height] +=
483 Lambda->cellSize[xCellList[r]];
484 xStartCellGroup[xUpsilonStack->height] = xStartNewCellGroup;
485 xUpsilonStack->parent[xUpsilonStack->height] = i;
486 xCellGroupSize[xUpsilonStack->height] = last - xStartNewCellGroup;
487 xCellGroupSize[i] -= (last - xStartNewCellGroup);
488 xUpsilonStack->totalGroupSize[i] -=
489 xUpsilonStack->totalGroupSize[xUpsilonStack->height]
490 RESTORE_ZERO;
491 split.oldCellSize = cellSize[cellToSplit];
492 split.newCellSize = cellSize[UpsilonStack->height];
493 return split;
494 #endif
495 }
496
497 #ifdef xxxxxx
498 /*-------------------------- deleteRefnListEntry --------------------------*/
499
500 static void deleteRefnListEntry(
501 RefnListEntry *entryToDelete,
502 Unsigned hashPosition, /* hashTableSize+1 indicates unknown */
503 RefnListEntry *prevHashListEntry)
504 {
505 if ( hashPosition > hashTableSize ) {
506 hashPosition = HASH( entryToDelete->i, entryToDelete->j);
507 if ( hashTable[hashPosition] == entryToDelete )
508 prevHashListEntry = NULL;
509 else {
510 prevHashListEntry = hashTable[hashPosition];
511 while ( prevHashListEntry->hashLink != entryToDelete )
512 prevHashListEntry = prevHashListEntry->hashLink;
513 }
514 }
515 if ( prevHashListEntry )
516 prevHashListEntry->hashLink = entryToDelete->hashLink;
517 else
518 hashTable[hashPosition] = entryToDelete->hashLink;
519 if ( entryToDelete->last )
520 entryToDelete->last->next = entryToDelete->next;
521 else
522 inUseListHeader = entryToDelete->next;
523 if ( entryToDelete->next )
524 entryToDelete->next->last = entryToDelete->last;
525 entryToDelete->next = freeListHeader;
526 freeListHeader = entryToDelete;
527 }
528
529
530 /*-------------------------- isUPartnStabReducible ------------------------*/
531
532 RefinementPriorityPair isOrbReducible(
533 const RefinementFamily *family, /* The refinement family mapping
534 must be orbRefine; family parm[0]
535 is the group. */
536 const PartitionStack *const UpsilonStack)
537 {
538 BOOLEAN cellWillSplit;
539 Unsigned i, j, m, groupNumber, hashPosition, newCellNumber, oldCellNumber,
540 count;
541 unsigned long minPriority, thisPriority;
542 UnsignedS *oldLevelAddr;
543 UnsignedS *const pointList = UpsilonStack->pointList,
544 *const startCell = UpsilonStack->startCell,
545 *const cellSize = UpsilonStack->cellSize;
546 PermGroup *G = family->familyParm_L[0].ptrParm;
547 Unsigned level = family->familyParm_L[1].intParm;
548 UnsignedS *orbNumberOfPt = G->orbNumberOfPt[level];
549 UnsignedS *const startOfOrbitNo = G->startOfOrbitNo[level];
550 RefinementPriorityPair reducingRefn;
551 RefnListEntry *refnList;
552 RefnListEntry *p, *oldP, *position, *minPosition;
553
554 /* Check that the refinement mapping really is pointStabRefn, as required,
555 and that the group is one for which initializeOrbRefine has been
556 called. */
557 if ( family->refine != orbRefine )
558 ERROR( "isOrbReducible", "Error: incorrect refinement mapping");
559 for ( groupNumber = 0 ; groupNumber < refnData.groupCount &&
560 refnData.group[groupNumber] != G ;
561 ++groupNumber )
562 ;
563 if ( groupNumber >= refnData.groupCount )
564 ERROR( "isOrbReducible", "Routine not initialized for group.")
565 hashTable = refnData.hashTable[groupNumber];
566 refnList = refnData.refnList[groupNumber];
567 oldLevelAddr = &refnData.oldLevel[groupNumber];
568
569 /* If this is a new level, we reconstruct the list of potential refinements
570 from scratch. */
571 if ( level != *oldLevelAddr ) {
572
573 /* Initialize data structures. */
574 *oldLevelAddr = level;
575 for ( i = 0 ; i < hashTableSize ; ++i )
576 hashTable[i] = NULL;
577 freeListHeader = &refnList[0];
578 inUseListHeader = NULL;
579 for ( i = 0 ; i < G->degree ; ++i)
580 refnList[i].next = &refnList[i+1];
581 refnList[G->degree].next = NULL;
582
583 /* Process the i'th cell of the top partition for i = 1,2,...., finding all
584 possible refinements. */
585 for ( i = 1 ; i <= UpsilonStack->height ; ++i ) {
586
587 /* First check if the i'th cell will split. If not, proceed directly
588 to the next cell. */
589 for ( m = startCell[i]+1 , cellWillSplit = FALSE ;
590 m < startCell[i] + cellSize[i] && !cellWillSplit ; ++m )
591 if ( orbNumberOfPt[ pointList[m] ] !=
592 orbNumberOfPt[ pointList[m-1] ] )
593 cellWillSplit = TRUE;
594 if ( !cellWillSplit )
595 continue;
596
597 /* Now find all splittings of the i'th cell and insert them into the
598 list in sorted order. */
599 for ( m = startCell[i] ; m < startCell[i]+cellSize[i] ; ++m ) {
600 j = orbNumberOfPt[pointList[m]];
601 hashPosition = HASH( i, j);
602 p = hashTable[hashPosition];
603 while ( p && (p->i != i || p->j != j) )
604 p = p->hashLink;
605 if ( p )
606 ++p->newCellSize;
607 else {
608 if ( !freeListHeader )
609 ERROR( "isOrbReducible",
610 "Refinement list exceeded bound (should not occur).")
611 p = freeListHeader;
612 freeListHeader = freeListHeader->next;
613 p->next = inUseListHeader;
614 if ( inUseListHeader )
615 inUseListHeader->last = p;
616 p->last = NULL;
617 inUseListHeader = p;
618 p->hashLink = hashTable[hashPosition];
619 hashTable[hashPosition] = p;
620 p->i = i;
621 p->j = j;
622 p->newCellSize = 1;
623 }
624 }
625 }
626 }
627
628 /* If this is not a new level, we merely fix up the old list. The entries
629 for the new cell must be created and those for its parent must be
630 adjusted. */
631 else {
632 freeListHeader = refnData.freeListHeader[groupNumber];
633 inUseListHeader = refnData.inUseListHeader[groupNumber];
634 newCellNumber = UpsilonStack->height;
635 oldCellNumber = UpsilonStack->parent[UpsilonStack->height];
636 for ( m = startCell[newCellNumber] , cellWillSplit = FALSE ;
637 m < startCell[newCellNumber] + cellSize[newCellNumber] ; ++m ) {
638 if ( m > startCell[newCellNumber] &&
639 orbNumberOfPt[pointList[m]] != orbNumberOfPt[pointList[m-1]] )
640 cellWillSplit = TRUE;
641 j = orbNumberOfPt[pointList[m]];
642 hashPosition = HASH( oldCellNumber, j);
643 p = hashTable[hashPosition];
644 oldP = NULL;
645 while ( p && (p->i != oldCellNumber || p->j != j) ) {
646 oldP = p;
647 p = p->hashLink;
648 }
649 if ( p ) {
650 --p->newCellSize;
651 if ( p->newCellSize == 0 )
652 deleteRefnListEntry( p, hashPosition, oldP);
653 }
654 }
655 if ( cellWillSplit )
656 for ( m = startCell[newCellNumber] , cellWillSplit = FALSE ;
657 m < startCell[newCellNumber] + cellSize[newCellNumber] ; ++m ) {
658 hashPosition = HASH( newCellNumber, j);
659 p = hashTable[hashPosition];
660 while ( p && (p->i != newCellNumber || p->j != j) )
661 p = p->hashLink;
662 if ( p )
663 ++p->newCellSize;
664 else {
665 if ( !freeListHeader )
666 ERROR( "isOrbReducible",
667 "Refinement list exceeded bound (should not occur).")
668 p = freeListHeader;
669 freeListHeader = freeListHeader->next;
670 p->next = inUseListHeader;
671 if ( inUseListHeader )
672 inUseListHeader->last = p;
673 p->last = NULL;
674 inUseListHeader = p;
675 p->hashLink = hashTable[hashPosition];
676 hashTable[hashPosition] = p;
677 p->i = newCellNumber;
678 p->j = j;
679 p->newCellSize = 1;
680 }
681 }
682 }
683
684 /* Now we return a refinement of minimal priority. While searching the
685 list, we also check for refinements invalidated by previous splittings. */
686 minPosition = inUseListHeader;
687 minPriority = ULONG_MAX;
688 count = 1;
689 for ( position = inUseListHeader ; position && count < 100 ;
690 position = position->next , ++count ) {
691 while ( position && position->newCellSize == cellSize[position->i] ) {
692 p = position;
693 position = position->next;
694 deleteRefnListEntry( p, hashTableSize+1, NULL);
695 }
696 if ( !position )
697 break;
698 if ( (thisPriority = (unsigned long) position->newCellSize +
699 MIN( cellSize[position->i], SIZE_OF_ORBIT(position->j) )) <
700 minPriority ) {
701 minPriority = thisPriority;
702 minPosition = position;
703 }
704 }
705 if ( minPriority == ULONG_MAX )
706 reducingRefn.priority = IRREDUCIBLE;
707 else {
708 reducingRefn.refn.family = (RefinementFamily *)(family);
709 reducingRefn.refn.refnParm[0].intParm = minPosition->i;
710 reducingRefn.refn.refnParm[1].intParm = minPosition->j;
711 reducingRefn.priority = thisPriority;
712 }
713
714 /* If this is the last call to isOrbReducible for this group (UpsilonStack
715 has height degree-1), free memory and reinitialize. */
716 if ( UpsilonStack->height == G->degree - 1 ) {
717 freePtrArrayDegree( refnData.hashTable[groupNumber]);
718 free( refnData.refnList[groupNumber]);
719 refnData.group[groupNumber] = NULL;
720 --trueGroupCount;
721 if ( trueGroupCount == 0 )
722 refnData.groupCount = 0;
723 }
724
725 refnData.freeListHeader[groupNumber] = freeListHeader;
726 refnData.inUseListHeader[groupNumber] =inUseListHeader;
727 return reducingRefn;
728 }
729
730
731 cstExtraRBase()
732 #endif
733