1 /*  This file is part of Conauto.
2 
3     Conauto is free software: you can redistribute it and/or modify
4     it under the terms of the GNU General Public License as published by
5     the Free Software Foundation, either version 3 of the License, or
6     (at your option) any later version.
7 
8     Conauto is distributed in the hope that it will be useful,
9     but WITHOUT ANY WARRANTY; without even the implied warranty of
10     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11     GNU General Public License for more details.
12 
13     You should have received a copy of the GNU General Public License
14     along with Conauto.  If not, see <http://www.gnu.org/licenses/>.
15 */
16 
17 /*data structures and function to generate sequences of partitions */
18 
19 #include <stdlib.h>
20 #include <string.h>
21 #include <inttypes.h>
22 #include <stdint.h>
23 
24 #include "refinements.h"
25 #include "seqpart.h"
26 #include "sort.h"
27 #include "groups.h"
28 #include "graph.h"
29 
30 Partition *p_global; /* due to compute cell types within 'qsort' routine */
31 
cmp_cell_size(const void * a,const void * b)32 int cmp_cell_size ( const void *a, const void *b )
33 {
34 	const uint16_t *c1 = (uint16_t*)a;
35 	const uint16_t *c2 = (uint16_t*)b;
36 
37 	return (p_global->start[*c1 + 1] - p_global->start[*c1]) - (p_global->start[*c2 + 1] - p_global->start[*c2]);
38 }
39 
compute_cell_types(int level,SeqPart * sp,uint16_t * valid_cells)40 uint16_t compute_cell_types ( int level, SeqPart *sp, uint16_t *valid_cells )
41 {
42 	uint64_t vct[sp->partition[level].noc];	/* valid cell types */
43 	uint8_t valid[sp->partition[level].noc];	/* valid cell types */
44 	uint16_t c, cc;
45 	uint16_t nof_valid;
46 
47 	nof_valid = 0;
48 /*
49 	for ( c = 0; c < sp->partition[level].noc; c++ )
50 		valid[c] = ( sp->partition[level].attrib[c] != 0 );
51 	for ( c = 0; c < sp->partition[level].noc; c++ )
52 		vct[c] = ( ( ( sp->partition[level].attrib[c] ) & UINT64_C(0xffffffffffff0000) ) | ( (uint64_t)(sp->partition[level].start[c+1] - sp->partition[level].start[c]) & UINT64_C(0x000000000000ffff) ) );
53 	for ( c = 0; c < sp->partition[level].noc; c++ )
54 		if ( valid[c] )
55 			for ( cc = (uint16_t)(c+1); cc < sp->partition[level].noc; cc++ )
56 				if ( vct[c] == vct[cc] )
57 					valid[cc] = FALSE;
58 	for ( c = 0; c < sp->partition[level].noc; c++ )
59 		if ( valid[c] )
60 			valid_cells[nof_valid++] = c;
61 */
62 	for ( c = 0; c < sp->partition[level].noc; c++ )
63 	{
64 		valid[c] = ( sp->partition[level].attrib[c] != 0 );
65 		vct[c] = ( ( ( sp->partition[level].attrib[c] ) & UINT64_C(0xffffffffffff0000) ) | ( (uint64_t)(sp->partition[level].start[c+1] - sp->partition[level].start[c]) & UINT64_C(0x000000000000ffff) ) );
66 	}
67 	for ( c = 0; c < sp->partition[level].noc; c++ )
68 		if ( valid[c] )
69 		{
70 			for ( cc = (uint16_t)(c+1); cc < sp->partition[level].noc; cc++ )
71 				if ( vct[c] == vct[cc] )
72 					valid[cc] = FALSE;
73 			valid_cells[nof_valid++] = c;
74 		}
75 /**/
76 	p_global = &(sp->partition[level]);
77 	qsort(valid_cells, nof_valid, sizeof ( uint16_t ), cmp_cell_size );
78 	return nof_valid;
79 }
80 
best_individualized_cell(const struct graph * g,SeqPart * sp,int level)81 uint16_t best_individualized_cell ( const struct graph *g, SeqPart *sp, int level )
82 {
83 	uint16_t start[2][g->num_vert+1];
84 	uint16_t discarded_cells[g->num_vert];
85 	uint8_t affected[g->num_vert];
86 	uint16_t vc[sp->partition[level].noc];	/* valid cells */
87 	uint16_t nof_valid;	/* number of valid cells */
88 	int reached_level;
89 	uint16_t c, best_cell;
90 	uint16_t best_noc;	/* maximum number of cells */
91 	uint16_t best_size;	/* minimum pivot cell size */
92 	uint16_t i;
93 
94 /* INFO:
95 	level:		to refine partition level
96 	level+1:	new partition level (maybe allocated preoviusly)
97 	level+2:	virtual (reached level - 1) partition
98 	level+3:	virtual reached level partition
99 */
100 	nof_valid = compute_cell_types ( level, sp, vc );
101 	best_cell = 0;
102 	best_noc = 0;
103 	best_size = UINT16_MAX;
104 	sp->partition[level+2].start = start[0];
105 	sp->partition[level+3].start = start[1];
106 	/* if new level has not been allocated yet */
107 	if ( sp->partition[level+1].vertices == NULL )
108 	{
109 		sp->partition[level].discarded_cells = discarded_cells;
110 		sp->partition[level].affected = affected;
111 	}
112 	for ( i = 0; i < nof_valid; i++ )
113 	{
114 		sp->dcs_nodes++;
115 		sp->partition[level].pivot = c = vc[i];
116 		reached_level = multirefinement ( level, sp, g );
117 		if ( sp->partition[level+3].start[sp->partition[level+3].noc] == sp->partition[level+3].noc )
118 		{
119 			best_cell = c;
120 			break;
121 		}
122 		if ( is_subpartition( sp, level+3, level ) )
123 		{
124 			best_cell = c;
125 			break;
126 		}
127 		/* if split more cells */
128 		if ( sp->partition[level+3].noc + ( sp->partition[level].start[sp->partition[level].noc] - sp->partition[level+3].start[sp->partition[level+3].noc] ) > best_noc )
129 		{
130 			best_noc = (uint16_t)( sp->partition[level+3].noc + (uint16_t)( sp->partition[level].start[sp->partition[level].noc] - sp->partition[level+3].start[sp->partition[level+3].noc] ) );
131 			best_cell = c;
132 			best_size = (uint16_t)(sp->partition[level].start[sp->partition[level].pivot+1] - sp->partition[level].start[sp->partition[level].pivot] );
133 		}
134 		else
135 			/* if split the same cells, but initial pivot cell is smaller */
136 			if ( ( ( sp->partition[level+3].noc + ( sp->partition[level].start[sp->partition[level].noc] - sp->partition[level+3].start[sp->partition[level+3].noc] ) ) == best_noc ) && ( best_size > (uint16_t)(sp->partition[level].start[sp->partition[level].pivot+1] - sp->partition[level].start[sp->partition[level].pivot] ) ) )
137 			{
138 				best_cell = c;
139 				best_size = (uint16_t)(sp->partition[level].start[sp->partition[level].pivot+1] - sp->partition[level].start[sp->partition[level].pivot] );
140 			}
141 	}
142 	sp->partition[level+2].vertices = NULL;
143 	sp->partition[level+3].vertices = NULL;
144 	return best_cell;
145 }
146 
best_pivot(const struct graph * g,SeqPart * sp,int level,uint8_t choose_not_valid)147 uint16_t best_pivot ( const struct graph *g, SeqPart *sp, int level, uint8_t choose_not_valid )
148 {
149 	uint16_t best;
150 	uint16_t i;
151 	uint16_t best_size;
152 	uint16_t best_attrib;
153 	Partition *p = &(sp->partition[level]);
154 
155 	best = 0;
156 	best_size = p->start[1];
157 	best_attrib = (uint16_t)p->attrib[0];
158 	for ( i = 1; i < p->noc; i++ )
159 		if ( sp->valid_old[i] )
160 			if ( !sp->valid_old[best] )
161 			{
162 				best = i;
163 				best_size = (uint16_t) ( p->start[i+1] - p->start[i] );
164 				best_attrib = (uint16_t)p->attrib[i];
165 			}
166 			else
167 				if ( best_size > ( p->start[i+1] - p->start[i] ) )
168 				{
169 					best = i;
170 					best_size = (uint16_t) ( p->start[i+1] - p->start[i] );
171 					best_attrib = (uint16_t)p->attrib[i];
172 				}
173 				else
174 					if ( ( best_size == ( p->start[i+1] - p->start[i] ) ) && ( best_attrib < (uint16_t)p->attrib[i] ) )
175 					{
176 						best = i;
177 						best_attrib = (uint16_t)p->attrib[i];
178 					}
179 					else;
180 #ifdef PCS
181 		else;
182 	/* choosing best pivot cell between not valid cells */
183 	if ( choose_not_valid && !sp->valid_old[best] && sp->partition[level].noc > 1 )
184 		return best_individualized_cell ( g, sp, level );
185 #else
186 		else
187 			if ( !sp->valid_old[best] && (uint16_t)p->attrib[i] )
188 				if ( best_attrib == 0 || best_size > (p->start[i+1] - p->start[i]) )
189 				{
190 					best = i;
191 					best_size = (uint16_t)(p->start[i+1] - p->start[i]);
192 					best_attrib = (uint16_t)(p->attrib[i]);
193 				}
194 #endif
195 	return ( best );
196 }
197 
mark_cells_with_no_links(Partition * p)198 void mark_cells_with_no_links ( Partition *p )
199 {
200 	uint16_t c; /* cell */
201 
202 	p->nodc = 0;
203 	for ( c = 0; c < p->noc; c++ )
204 		if ( (uint16_t)p->attrib[c] == 0 )
205 			p->discarded_cells[p->nodc++] = c;
206 }
207 
free_partitions(SeqPart * sp,int level)208 void free_partitions ( SeqPart *sp, int level )
209 {
210 	int cl;	/* current level */
211 	Partition *cp; /* current partition */
212 	Partition *pp; /* previous partition */
213 
214 	cp = NULL;
215 	for ( cl = level; sp->partition[cl].vertices != NULL; cl++ )
216 	{
217 		cp = &(sp->partition[cl]);
218 		pp = &(sp->partition[cl-1]);
219 		free ( pp->affected );
220 		free ( pp->discarded_cells );
221 		free ( cp->vertices );
222 		free ( cp->belongs );
223 		free ( cp->start );
224 		free ( cp->attrib );
225 		if ( cp->adeg != NULL )
226 			free ( cp->adeg );
227 		cp->vertices = NULL;
228 		if ( cp->mm_collection != NULL )
229 		{
230 			free ( cp->mm_collection->mismatch_hash );
231 			free ( cp->mm_collection->counters );
232 			free ( cp->mm_collection );
233 			cp->mm_collection = NULL;
234 		}
235 		if ( cp->go != NULL )
236 		{
237 			free ( cp->go->vertices );
238 			free ( cp->go->orbits );
239 			free ( cp->go );
240 			cp->go = NULL;
241 		}
242 	}
243 	free ( cp->discarded_cells ); /* warning: if do not free partitions up to LAST */
244 }
245 
allocate_partition(SeqPart * sp,int level,uint16_t noc,uint16_t num_vert)246 void allocate_partition ( SeqPart *sp, int level, uint16_t noc, uint16_t num_vert )
247 {
248 	Partition *cp; /* current partition */
249 	Partition *pp; /* previous partition */
250 
251 	cp = &(sp->partition[level]);
252 	pp = &(sp->partition[level-1]);
253 	if ( cp->vertices == NULL )
254 	{
255 		pp->affected = malloc ( noc * sizeof ( uint8_t ) );
256 		pp->discarded_cells = malloc ( noc * sizeof ( uint16_t ) );
257 		cp->vertices = malloc ( num_vert * sizeof ( uint16_t ) );
258 		cp->belongs = malloc ( sp->partition[0].start[sp->partition[0].noc] * sizeof ( uint16_t ) );
259 		cp->start = calloc ( (size_t)( num_vert + 1 ), sizeof ( uint16_t ) );
260 		cp->attrib = malloc ( num_vert * sizeof ( uint64_t ) );
261 		cp->adeg = malloc ( num_vert * sizeof ( uint64_t ) );
262 	}
263 }
264 
free_seqpart(SeqPart * sp)265 void free_seqpart ( SeqPart *sp )
266 {
267 	free_partitions ( sp, 1 );
268 	free ( sp->partition[0].vertices );
269 	free ( sp->partition[0].belongs );
270 	free ( sp->partition[0].start );
271 	free ( sp->partition[0].attrib );
272 	if ( sp->partition[0].adeg != NULL )
273 		free ( sp->partition[0].adeg );
274 	sp->partition[0].vertices = NULL;
275 	if ( sp->partition[0].mm_collection != NULL )
276 	{
277 		free ( sp->partition[0].mm_collection->mismatch_hash );
278 		free ( sp->partition[0].mm_collection->counters );
279 		free ( sp->partition[0].mm_collection );
280 		sp->partition[0].mm_collection = NULL;
281 	}
282 	if ( sp->partition[0].go != NULL )
283 	{
284 		free ( sp->partition[0].go->vertices );
285 		free ( sp->partition[0].go->orbits );
286 		free ( sp->partition[0].go );
287 		sp->partition[0].go = NULL;
288 	}
289 	free ( sp->partition );
290 	free ( sp->discarded_vert );
291 	free ( sp->valid_new );
292 	free ( sp->valid_old );
293 	free ( sp->num_adj );
294 }
295 
allocate_seqpart(SeqPart * sp,uint32_t size)296 void allocate_seqpart ( SeqPart *sp, uint32_t size )
297 {
298 	Partition *cp;
299 
300 	sp->partition = calloc ( (size_t)( size * 2 ), sizeof ( Partition ) );
301 	sp->nodv = 0;
302 	sp->discarded_vert = malloc ( size * sizeof ( uint16_t ) );
303 	sp->valid_new = malloc ( size * sizeof ( uint8_t ) );
304 	sp->valid_old = malloc ( size * sizeof ( uint8_t ) );
305 	sp->num_adj = malloc ( size * sizeof ( uint64_t ) );
306 	sp->nodes = 1;
307 	sp->nodes_limit = UINT32_MAX;
308 	sp->dcs_nodes = 0;
309 	sp->bad_nodes = 0;
310 	cp = &(sp->partition[0]);
311 	cp->vertices = malloc ( size * sizeof ( uint16_t ) );
312 	cp->belongs = malloc ( size * sizeof ( uint16_t ) );
313 	cp->start = calloc ( (size_t)( size + 1 ), sizeof ( uint16_t ) );
314 	cp->attrib = malloc ( size * sizeof ( uint64_t ) );
315 	cp->adeg = malloc ( size * sizeof ( uint64_t ) );
316 }
317 
generate_degree_partition(SeqPart * sp,const struct graph * g)318 void generate_degree_partition ( SeqPart *sp, const struct graph *g )
319 {
320 	uint16_t v;
321 	uint64_t adeg; /* current degree of the cell processed */
322 	uint16_t noc; /* number of cells */
323 	Partition *p = &(sp->partition[0]);
324 
325 	sp->partition[0].nopdv = 0;
326 	for ( v = 0; v < g->num_vert; v++ )
327 		p->vertices[v] = v;
328 	sort_cell ( g->num_vert, p->vertices, g->deg );
329 	p->start[0] = 0;
330 	p->belongs[p->vertices[0]] = 0;
331 	adeg = g->deg[p->vertices[0]];
332 	p->attrib[0] = adeg;
333 	sp->valid_old[0] = ( adeg > 0 );
334 	noc = 1;
335 	for ( v = 1; v < g->num_vert; v++ )
336 	{
337 		if ( g->deg[p->vertices[v]] != adeg )
338 		{
339 			p->start[noc] = v;
340 			adeg = g->deg[p->vertices[v]];
341 			p->attrib[noc] = adeg;
342 			sp->valid_old[noc] = ( adeg > 0 );
343 			noc++;
344 		}
345 		p->belongs[p->vertices[v]] = (uint16_t) ( noc - 1 );
346 	}
347 	p->start[noc] = v;
348 	sp->valid_old[0] = sp->valid_old[0] && ( noc > 1 );
349 	p->noc = noc;
350 }
351 
exchange_partitions(Partition * p1,Partition * p2)352 void exchange_partitions ( Partition *p1, Partition *p2 )
353 {
354 	EXCHANGE( p1->attrib, p2->attrib, uint64_t* );
355 	EXCHANGE( p1->adeg, p2->adeg, uint64_t* );
356 	EXCHANGE( p1->vertices, p2->vertices, uint16_t* );
357 	EXCHANGE( p1->belongs, p2->belongs, uint16_t* );
358 	EXCHANGE( p1->start, p2->start, uint16_t* );
359 	EXCHANGE( p1->discarded_cells, p2->discarded_cells, uint16_t* );
360 	EXCHANGE( p1->affected, p2->affected, uint8_t* );
361 	EXCHANGE( p1->pivot, p2->pivot, uint16_t );
362 	EXCHANGE( p1->noc, p2->noc, uint16_t );
363 	EXCHANGE( p1->nodc, p2->nodc, uint16_t );
364 	EXCHANGE( p1->nopdv, p2->nopdv, uint16_t );
365 	EXCHANGE( p1->tor, p2->tor, uint8_t );
366 }
367 
generate_seqpart(SeqPart * sp,const struct graph * g,int level)368 void generate_seqpart ( SeqPart *sp, const struct graph *g, int level )
369 {
370 	uint16_t pc; /* pivot cell */
371 	uint8_t success;
372 	uint16_t bck;
373 
374 	while ( sp->partition[level].start[sp->partition[level].noc] > sp->partition[level].noc )
375 	{
376 
377 		pc = sp->partition[level].pivot = best_pivot ( g, sp, level, TRUE );
378 
379 		if ( (uint16_t)sp->partition[level].attrib[pc] == 0 )
380 			break;
381 		level++;
382 		allocate_partition ( sp, level, sp->partition[level-1].noc, sp->partition[level-1].start[sp->partition[level-1].noc] );
383 		/* VERTEX */
384 		if ( ( sp->partition[level-1].start[pc+1] - sp->partition[level-1].start[pc] ) == 1 )
385 		{
386 			vertex_refinement ( sp, g, level-1 );
387 			sp->partition[level-1].tor = VERTEX;
388 			free ( sp->partition[level].adeg );
389 			sp->partition[level].adeg = NULL;
390 		}
391 		/* SET */
392 		else if ( sp->valid_old[pc] ) /* SET */
393 		{
394 			sp->valid_old[pc] = FALSE;
395 			bck = sp->nodv;
396 			set_refinement ( sp, g, level-1, &success );
397 			sp->partition[level-1].tor = SET;
398 			if ( ! success )
399 			{
400 				level--;
401 				sp->nodv = bck;
402 				continue;
403 			}
404 		}
405 		/* UNKNOWN: individualization */
406 		else
407 		{
408 			vertex_refinement ( sp, g, level-1 );
409 			sp->partition[level-1].tor = UNKNOWN;
410 			free ( sp->partition[level].adeg );
411 			sp->partition[level].adeg = NULL;
412 			sp->nodes++;
413 		}
414 		EXCHANGE( sp->valid_old, sp->valid_new, uint8_t* );
415 	}
416 	sp->partition[level].tor = LAST;
417 	sp->partition[level+1].start = NULL;
418 	sp->partition[level].discarded_cells = malloc ( sp->partition[level].noc * sizeof ( uint16_t ) );
419 	mark_cells_with_no_links ( &(sp->partition[level]) );
420 	sp->lp = level;
421 	memcpy ( sp->discarded_vert + sp->nodv, sp->partition[level].vertices, sp->partition[level].start[sp->partition[level].noc] * sizeof (uint16_t ) );
422 }
423 
degree_partitions_are_compatible(Partition * p1,Partition * p2)424 int degree_partitions_are_compatible ( Partition *p1, Partition *p2 )
425 {
426 	int compatible;
427 	uint16_t c;
428 
429 	for ( compatible = ( p1->noc == p2->noc ), c = 0; compatible && c < p1->noc; c++ )
430 		compatible = ( p1->start[c] == p2->start[c] && p1->attrib[c] == p2->attrib[c] );
431 	return compatible;
432 }
433 
434 /* int is_not_worth ( SeqPart *sp, int bl, int level ) */
is_subpartition(SeqPart * sp,int level,int bl)435 int is_subpartition ( SeqPart *sp, int level, int bl )
436 {
437 	const Partition *p = &(sp->partition[level]);
438 	uint16_t c1, c2;
439 
440 	c1 = 0;
441 	c2 = 1;
442 	while ( c2 < p->noc )
443 	{
444 		if ( (uint16_t)p->attrib[c1] == 0 )
445 		{
446 			c1++;
447 			c2++;
448 		}
449 		else if ( (uint16_t)p->attrib[c2] == 0 )
450 			c2++;
451 		else if ( sp->partition[bl].belongs[p->vertices[p->start[c1]]] == sp->partition[bl].belongs[p->vertices[p->start[c2]]] )
452 			return FALSE;
453 		else
454 		{
455 			c1 = c2;
456 			c2++;
457 		}
458 	}
459 	return TRUE;
460 }
461 
compute_auto_search_limit(SeqPart * sp,int level)462 void compute_auto_search_limit ( SeqPart *sp, int level )
463 {
464 	int target_level;
465 
466 	target_level = level+1;
467 	while ( target_level < sp->lp && ( sp->partition[target_level].tor != UNKNOWN || !is_subpartition ( sp, target_level, level ) ) )
468 		target_level++;
469 	sp->partition[level].auto_search_limit = target_level;
470 }
471 
compute_auto_search_limits(SeqPart * sp)472 void compute_auto_search_limits ( SeqPart *sp )
473 {
474 	int level;
475 
476 	for ( level = 0; level < sp->lp; level++ )
477 		if ( sp->partition[level].tor == UNKNOWN )
478 			compute_auto_search_limit ( sp, level );
479 }
480 
compute_backtrack_levels(SeqPart * sp)481 void compute_backtrack_levels ( SeqPart *sp )
482 {
483 	int level;
484 	int bl; /* backtrack level */
485 
486 	for ( level = sp->lp; level >= 0; level-- )
487 #ifdef BKJ
488 		if ( sp->partition[level].tor == UNKNOWN )
489 		{
490 			for ( bl = level - 1; bl >= 0 && ( sp->partition[bl].tor != UNKNOWN || is_subpartition ( sp, level, bl ) ); bl-- );
491 			sp->partition[level].backtrack_level = bl;
492 		}
493 		else
494 #endif
495 			sp->partition[level].backtrack_level = level-1;
496 }
497 
num_backtrack_levels(SeqPart * sp)498 uint16_t num_backtrack_levels ( SeqPart *sp )
499 {
500 	uint16_t count;
501 	uint16_t level;
502 
503 	for ( count = 0, level = 0; level < sp->lp; level++ )
504 		count = (uint16_t) ( count + ( sp->partition[level].tor == BACKTR ) );
505 	return count;
506 }
507 
process_cells_with_no_links(Partition * p,Perm_Group * pg)508 void process_cells_with_no_links ( Partition *p, Perm_Group *pg )
509 {
510 	uint16_t c; /* cell */
511 	uint16_t v;
512 	uint16_t previous;
513 	uint16_t perm[pg->nelem];
514 	uint16_t cell_size;
515 
516 	memcpy ( perm, pg->base, pg->nelem * sizeof ( uint16_t ) );
517 	for ( c = 0, previous = (uint16_t) ( p->nopdv + ( p->tor == VERTEX || p->tor == UNKNOWN ) ); c < p->nodc; c++, previous = (uint16_t) (previous + cell_size ) )
518 	{
519 		cell_size = (uint16_t) ( p->start[p->discarded_cells[c]+1] - p->start[p->discarded_cells[c]] );
520 		for ( v = 1; v < cell_size; v++ )
521 		{
522 			perm[previous] = p->vertices[p->start[p->discarded_cells[c]] + v];
523 			perm[previous + v] = p->vertices[p->start[p->discarded_cells[c]]];
524 			set_perm ( pg, perm );
525 			merge_orbits ( &(pg->orbits), perm[previous], perm[previous + v] );
526 			perm[previous + v] = p->vertices[p->start[p->discarded_cells[c]] + v];
527 			perm[previous] = p->vertices[p->start[p->discarded_cells[c]]];
528 		}
529 	}
530 }
531 
check_cells_with_no_links(Partition * p,Perm_Group * pg,uint16_t * vertices_old)532 void check_cells_with_no_links ( Partition *p, Perm_Group *pg, uint16_t *vertices_old )
533 {
534 	uint16_t c; /* cell */
535 	uint16_t u, v, w;
536 	uint16_t perm[pg->nelem];
537 
538 	memcpy ( perm, pg->base, pg->nelem * sizeof ( uint16_t ) );
539 	for ( c = 0; c < p->nodc; c++ )
540 	{
541 		u = pg->inv_base[vertices_old[p->start[p->discarded_cells[c]]]];
542 		for ( v = (uint16_t) ( p->start[p->discarded_cells[c]]+1 ); v <  p->start[p->discarded_cells[c]+1]; v++ )
543 		{
544 			w = pg->inv_base[vertices_old[v]];
545 			perm[u] = vertices_old[v];
546 			perm[w] = vertices_old[p->start[p->discarded_cells[c]]];
547 			if ( !perm_in_pg ( pg, perm ) )
548 			{
549 				set_perm ( pg, perm );
550 				merge_orbits ( &(pg->orbits), perm[u], perm[w] );
551 			}
552 			perm[w] = vertices_old[v];
553 			perm[u] = vertices_old[p->start[p->discarded_cells[c]]];
554 		}
555 	}
556 }
557 
equal_partitions(int level,uint16_t * vertices,SeqPart * sp)558 uint8_t equal_partitions ( int level, uint16_t *vertices, SeqPart *sp )
559 {
560 	uint16_t c; /* cell being processed */
561 	uint16_t v; /* vertex in the cell */
562 
563 	for ( c = 0; c < sp->partition[level].noc; c++ )
564 		if ( (uint16_t)sp->partition[level].attrib[c] )
565 			for ( v = sp->partition[level].start[c]; v < sp->partition[level].start[c+1]; v++ )
566 				if ( sp->partition[level].vertices[v] != vertices[v] )
567 					return FALSE;
568 	return TRUE;
569 }
570 
push_perm_to_discarded_vert(Perm_Group * pg,Permutation perm,Partition * p,uint16_t * vertices,uint32_t num_vert)571 void push_perm_to_discarded_vert ( Perm_Group *pg, Permutation perm, Partition *p, uint16_t *vertices, uint32_t num_vert )
572 {
573 	const Permutation base = pg->base;
574 	uint16_t c;
575 	uint16_t v;
576 	uint16_t i;
577 	uint16_t start;
578 	uint16_t cell_size;
579 
580 	for ( i = p->nopdv; i < num_vert; i++ )
581 		perm[i] = base[i];
582 	for ( c = 0; c < p->nodc; c++ )
583 	{
584 		start = p->start[p->discarded_cells[c]];
585 		cell_size = (uint16_t) ( p->start[p->discarded_cells[c]+1] - start );
586 		for ( v = 0; v < cell_size; v++ )
587 			perm[pg->inv_base[p->vertices[start + v]]] = vertices[start + v];
588 	}
589 }
590 
591 /* algorithm of sub-partitions theorem */
process_contained_cells(int level,uint16_t * vertices,SeqPart * sp,Perm_Group * pg,int backtrack_level)592 void process_contained_cells ( int level, uint16_t *vertices, SeqPart *sp, Perm_Group *pg, int backtrack_level )
593 {
594 	const Partition *p = &(sp->partition[level]);
595 	uint16_t previous;
596 	uint16_t cell_size;
597 	uint16_t c;	/* cell */
598 	uint16_t v, w;
599 	uint16_t offset;
600 	uint16_t inv_dv[pg->nelem];	/* discarded_vert inverse perm */
601 
602 	memcpy ( ( sp->discarded_vert + sp->nodv ), ( pg->base + sp->nodv ), (size_t)( pg->nelem - sp->nodv ) * sizeof ( uint16_t ) );
603 	/* Push discarded vertices in discarded_vert, and compute number of these vertices (offset). */
604 	offset = 0;
605 	for ( c = 0, previous = (uint16_t)( p->nopdv + 1 ); c < p->nodc; c++, previous = (uint16_t) (previous + cell_size ) )
606 	{
607 		cell_size = (uint16_t) ( p->start[p->discarded_cells[c]+1] - p->start[p->discarded_cells[c]] );
608 		for ( v = 0; v < cell_size; v++ )
609 			sp->discarded_vert[previous+v] = vertices[p->start[p->discarded_cells[c]]+v];
610 		offset = (uint16_t)( offset + cell_size );
611 	}
612 	inv_perm_partialy ( sp->discarded_vert, inv_dv, pg->nelem, sp->partition[backtrack_level].nopdv, (uint16_t)(sp->nodv + offset + 1), sp->discarded_vert[sp->nodv] );
613 	/* fill in new perm */
614 	for ( v = sp->partition[backtrack_level].nopdv; v < ( sp->partition[level].nopdv + offset + 1 ); v++ )
615 		if ( pg->inv_base[sp->discarded_vert[v]] >= (sp->partition[level].nopdv + offset +1) || pg->inv_base[sp->discarded_vert[v]] == sp->partition[level].nopdv )
616 		{
617 			w =  pg->base[v];
618 			while (inv_dv[w] != UINT16_MAX) {
619 				w = pg->base[inv_dv[w]];
620 			};
621 			sp->discarded_vert[pg->inv_base[sp->discarded_vert[v]]] = w;
622 		}
623 }
624 
equal_partial_orbits(int level,SeqPart * sp,Graph_Orbits * ogo,Graph_Orbits * pgo)625 uint8_t equal_partial_orbits ( int level, SeqPart *sp, Graph_Orbits *ogo, Graph_Orbits *pgo )
626 {
627 	const uint16_t pc = sp->partition[level].pivot; /* pivot cell */
628 	const size_t pc_size = (size_t)(sp->partition[level].start[pc+1] - sp->partition[level].start[pc]); /* pivot cell size */
629 	uint16_t oo_size_count[pc_size-1];	/* number of original orbits of each size */
630 	uint16_t po_size_count[pc_size-1];	/* number of partial orbit of each size */
631 	uint8_t oo_processed[sp->partition[0].start[sp->partition[0].noc]];
632 	uint8_t po_processed[pc_size];
633 	uint16_t v;
634 
635 	for ( v = 0; v < (pc_size-1); oo_size_count[v] = po_size_count[v] = 0, v++ );
636 	for ( v = 0; v < pc_size; po_processed[v++] = FALSE );
637 	for ( v = 0; v < sp->partition[0].start[sp->partition[0].noc]; oo_processed[v++] = FALSE );
638 	for ( v = 0; v < pc_size; v++ )
639 		if ( ! oo_processed[ogo->vertices[sp->partition[level].vertices[sp->partition[level].start[pc] + v]].belongs_to] )
640 		{
641 			oo_size_count[ogo->orbits[ogo->vertices[sp->partition[level].vertices[sp->partition[level].start[pc] + v]].belongs_to].size-1]++;
642 			oo_processed[ogo->vertices[sp->partition[level].vertices[sp->partition[level].start[pc] + v]].belongs_to] = TRUE;
643 		}
644 	for ( v = 0; v < pc_size; v++ )
645 		if ( ! po_processed[pgo->vertices[v].belongs_to] )
646 		{
647 			po_size_count[pgo->orbits[pgo->vertices[v].belongs_to].size-1]++;
648 			po_processed[pgo->vertices[v].belongs_to] = TRUE;
649 		}
650 	return !memcmp ( oo_size_count, po_size_count, (pc_size-1) * sizeof ( uint16_t ) );
651 }
652 
compute_orbits_order(int level,SeqPart * sp,Graph_Orbits * go,uint16_t * vertices,uint16_t * valid_vert_index)653 void compute_orbits_order ( int level, SeqPart *sp, Graph_Orbits *go, uint16_t *vertices, uint16_t *valid_vert_index )
654 {
655 	const uint16_t pc = sp->partition[level].pivot; /* pivot cell */
656 	const uint16_t pc_size = (uint16_t) (sp->partition[level].start[pc+1] - sp->partition[level].start[pc]); /* pivot cell size */
657 	uint16_t i, j;
658 	uint16_t valid_vertices[pc_size-1];
659 
660 	for ( i = 1; i < pc_size; i++ )
661 		valid_vertices[(uint16_t)(i-1)] = vertices[(uint16_t)(sp->partition[level].start[pc]+i)];
662 	sort_valid_orbits ( (uint16_t)(pc_size-1), valid_vertices, go );
663 	for ( i = 0; i < (uint16_t)(pc_size-1); i++ )
664 	{
665 		for ( j = 1; j < pc_size; j++ )
666 			if ( valid_vertices[i] == vertices[(uint16_t)(sp->partition[level].start[pc]+j)] )
667 			{
668 				valid_vert_index[(uint16_t)(pc_size-2-i)] = j;
669 				break;
670 			}
671 	}
672 }
673 
compute_partial_orbits_order(int level,SeqPart * sp,Graph_Orbits * ogo,Graph_Orbits * pgo,uint16_t * valid_vertices)674 uint16_t compute_partial_orbits_order ( int level, SeqPart *sp, Graph_Orbits *ogo, Graph_Orbits *pgo, /* uint16_t *vertices,*/ uint16_t *valid_vertices )
675 {
676 	const uint16_t pc = sp->partition[level].pivot; /* pivot cell */
677 	const uint16_t pc_size = (uint16_t) (sp->partition[level].start[pc+1] - sp->partition[level].start[pc]); /* pivot cell size */
678 	const uint16_t pc_orbitSize = ogo->orbits[ogo->vertices[sp->partition[level].vertices[sp->partition[level].start[pc]]].belongs_to].size;
679 	uint16_t nof_valid;
680 	uint16_t i;
681 	uint8_t po_compat; /*partial orbits compatible */
682 
683 	nof_valid = 0;
684 	po_compat = equal_partial_orbits ( level, sp, ogo, pgo );
685 	for ( i = 0; i < pc_size; i++ )
686 		if ( po_compat )
687 			if (pgo->orbits[pgo->vertices[i].belongs_to].size == pc_orbitSize )
688 				valid_vertices[nof_valid++] = i;
689 			else;
690 		else if ( pgo->orbits[pgo->vertices[i].belongs_to].size <= pc_orbitSize )
691 		{
692 			valid_vertices[nof_valid++] = i;
693 		}
694 /*
695 	if ( !po_compat )
696 		sort_valid_orbits ( nof_valid, valid_vertices, pgo );
697 */
698 	return nof_valid;
699 }
700 
generate_partial_orbits(SeqPart * sp,int level,uint16_t * vertices_old,Perm_Group * pg,Graph_Orbits * pgo,int prev_bl,uint16_t * discarded_vert,uint16_t * old_perm_sets,uint16_t * old_leaders,uint16_t * new_perm_sets,uint16_t * new_leaders)701 void generate_partial_orbits ( SeqPart *sp, int level, uint16_t* vertices_old, Perm_Group *pg, Graph_Orbits *pgo, int prev_bl, uint16_t *discarded_vert, uint16_t *old_perm_sets, uint16_t *old_leaders, uint16_t *new_perm_sets, uint16_t *new_leaders )
702 {
703 	const uint16_t pc = sp->partition[level].pivot; /* pivot cell */
704 	uint16_t inv [pg->nelem];
705 	uint16_t i, j;
706 	uint16_t p1, p2;
707 
708 	memcpy ( new_perm_sets, old_perm_sets, pg->nelem * sizeof ( uint16_t ) );
709 	memcpy ( new_leaders, old_leaders, pg->nelem * sizeof ( uint16_t ) );
710 	create_trivial_orbits ( pgo );
711 	for ( i = 0; i < pg->nelem; inv[i++] = UINT16_MAX );
712 	for ( i = 0; i < pgo->size; inv[vertices_old[sp->partition[level].start[pc]+i]] = i, i++ );
713 	for ( p1 = 0; p1 < pg->nperms; p1++ )
714 		if ( new_leaders[p1] )
715 		{
716 			uint16_t prev_perm_not_valid;	/* previous permutation not valid */
717 			uint16_t prev_perm_valid;	/* previous permutation valid */
718 
719 			prev_perm_not_valid = UINT16_MAX;
720 			prev_perm_valid = p1;
721 			p2 = new_perm_sets[p1];
722 			while ( p2 != UINT16_MAX )
723 			{
724 				if ( fixed_in_2_perms ( pg, p1, p2, discarded_vert, sp->partition[prev_bl].nopdv, sp->partition[level].nopdv ) )
725 				{
726 					/* merge orbits respect both (p1 and p2) perms */
727 					prev_perm_valid = p2;
728 					for ( i = sp->partition[level].start[pc]; i < sp->partition[level].start[pc+1]; i++ )
729 						if ( ( j = inv[pg->perms[p2][pg->inv_perms[p1][vertices_old[i]]]] ) != UINT16_MAX )
730 							merge_orbits ( pgo, (uint16_t)(i - sp->partition[level].start[pc]), j );
731 				}
732 				else
733 				{
734 					/* split permutation set */
735 					if ( prev_perm_not_valid == UINT16_MAX )
736 					{
737 						prev_perm_not_valid = p2;
738 						new_leaders[p2] = TRUE;
739 					}
740 					else
741 					{
742 						new_perm_sets[prev_perm_not_valid] = p2;
743 						prev_perm_not_valid = p2;
744 					}
745 					new_perm_sets[prev_perm_valid] = new_perm_sets[p2];
746 				}
747 				p2 = new_perm_sets[p2];
748 			}
749 			if ( prev_perm_not_valid != UINT16_MAX )
750 				new_perm_sets[prev_perm_not_valid] = UINT16_MAX;
751 			if ( new_perm_sets[p1] == UINT16_MAX )
752 				new_leaders[p1] = FALSE;
753 		}
754 }
755 
756 #define CHECK_VALID( vertices, orbits ) \
757 {	uint16_t i; \
758 \
759 	for ( i = 0; i < w && !are_in_the_same_orbit ( valid_vertices[i], valid_vertices[w], orbits ); i++ ); \
760 	valid = ( i == w );\
761 }
762 
763 int subtree_compat ( int level, uint16_t *vertices_old, const struct graph *g, SeqPart *sp, Perm_Group *pg, uint16_t *perm_sets, uint16_t *leaders, int init_bl, int prev_bl );
764 
deepen_in_backtrack(int level,uint16_t * vertices_new,uint16_t * vertices_old,const struct graph * g,SeqPart * sp,Perm_Group * pg,uint16_t * old_perm_sets,uint16_t * old_leaders,int init_bl,int prev_bl)765 int deepen_in_backtrack ( int level, uint16_t *vertices_new, uint16_t *vertices_old, const struct graph *g, SeqPart *sp, Perm_Group *pg, uint16_t *old_perm_sets, uint16_t *old_leaders, int init_bl, int prev_bl )
766 {
767 	const uint16_t pc = sp->partition[level].pivot; /* pivot cell */
768 	const uint16_t pc_size = (uint16_t) (sp->partition[level].start[pc+1] - sp->partition[level].start[pc]); /* pivot cell size */
769 	uint16_t new_perm_sets[pg->nelem];
770 	uint16_t new_leaders[pg->nelem];
771 	int backtrack_level;
772 	uint16_t v, w;
773 	uint8_t valid;
774 	Info_Vertex info_vertex[pc_size];
775 	Info_Orbit info_orbit[pc_size];
776 	Graph_Orbits pgo = { info_vertex, info_orbit, pc_size }; /* partial Graph_Orbits */
777 	uint16_t counters[pc_size];
778 	uint16_t valid_vertices[pc_size];
779 	uint16_t nof_valid;
780 
781 	for ( v = 0; v < pc_size; counters[v++] = 0 );
782 #ifdef PGO
783 	generate_partial_orbits ( sp, level, vertices_old, pg, &pgo, prev_bl, sp->discarded_vert, old_perm_sets, old_leaders, new_perm_sets, new_leaders );
784 	nof_valid = compute_partial_orbits_order ( level, sp, sp->partition[level].go, &pgo, /* vertices_old,*/ valid_vertices );
785 	for ( w = 0; w < nof_valid; w++ )
786 	{
787 		v = (uint16_t)(sp->partition[level].start[pc] + valid_vertices[w]);
788 		CHECK_VALID( vertices_old, &pgo );
789 		if ( valid )
790 #else
791 	for( v = sp->partition[level].start[pc]; v < sp->partition[level].start[pc+1]; v++ )
792 	{
793 #endif
794 		{
795 			uint16_t i, n;
796 
797 			sp->nodes++;
798 			if ( vertex_ref_compat ( sp, g, level, vertices_old[v], vertices_new, vertices_old, sp->discarded_vert ) )
799 			{
800 				if ( ( backtrack_level = subtree_compat ( level + 1, vertices_new, g, sp, pg, new_perm_sets, new_leaders, init_bl, level ) ) != level )
801 				{
802 					if( backtrack_level < level )
803 						sp->bad_nodes++;
804 					return backtrack_level;
805 				}
806 				else
807 					sp->bad_nodes++;
808 			}
809 			if( sp->nodes >= sp->nodes_limit )
810 			{
811 				printf( "Nodes limit:\t%"PRIu32"\nNodes:\t\t%"PRIu32"\n", sp->nodes_limit, sp->nodes );
812 				exit(150); /* NODES LIMIT EXCEEDED */
813 			}
814 #ifdef RF
815 #ifdef PGO
816 			for ( i = (uint16_t)(w+1), n = 1; i < nof_valid; i++ )
817 				if ( are_in_the_same_orbit ( valid_vertices[i], valid_vertices[w], &pgo ) )
818 					n++;
819 #else
820 			n = 1;
821 #endif
822 			if ( !mismatch_in_collection ( sp->partition[level].mm_collection, counters, n ) )
823 			{
824 				break;
825 			}
826 #endif
827 		}
828 	}
829 #ifdef RF
830 	mismatch_found_hash = UINT16_MAX;
831 #endif
832 	return sp->partition[level].backtrack_level;
833 }
834 
835 int subtree_compat ( int level, uint16_t *vertices_old, const struct graph *g, SeqPart *sp, Perm_Group *pg, uint16_t *perm_sets, uint16_t *leaders, int init_bl, int prev_bl )
836 {
837 	uint16_t vertices_new [sp->partition[level].start[sp->partition[level].noc]];
838 	uint16_t p;	/* pivot vertex */
839 	uint16_t pc_size;	/* pivot cell size */
840 	int backtrack_level;
841 
842 /*	check_cells_with_no_links ( &(sp->partition[level]), pg, vertices_old ); */
843 #ifdef EAD
844 	if ( equal_partitions ( level, vertices_old, sp ) )
845 	{
846 		push_perm_to_discarded_vert ( pg, sp->discarded_vert, &(sp->partition[level]), vertices_old, g->num_vert );
847 		return level;
848 	}
849 	if ( level == sp->partition[init_bl].auto_search_limit && sp->partition[level].tor != LAST )
850 	{
851 		process_contained_cells ( level, vertices_old, sp, pg, init_bl );
852 		return level;
853 	}
854 #endif
855 	switch ( sp->partition[level].tor )
856 	{
857 		case BACKTR:
858 #ifdef AGC
859 			return deepen_in_backtrack ( level, vertices_new, vertices_old, g, sp, pg, perm_sets, leaders, init_bl, prev_bl );
860 #else
861 			break;
862 #endif
863 		case LAST:
864 			if ( last_part_compat ( &(sp->partition[level]), vertices_old, g, g ) )
865 			{
866 				memcpy ( sp->discarded_vert + sp->nodv, vertices_old, sp->partition[level].start[sp->partition[level].noc] * sizeof ( uint16_t ) );
867 				return level;
868 			}
869 			break;
870 		case SET:
871 			if ( set_ref_compat ( sp, g, level, vertices_new, vertices_old, sp->discarded_vert ) )
872 				if ( ( backtrack_level = subtree_compat ( level + 1, vertices_new, g, sp, pg, perm_sets, leaders, init_bl, prev_bl ) ) != level )
873 					return backtrack_level;
874 			break;
875 		case VERTEX:
876 		default:
877 			p = vertices_old[sp->partition[level].start[sp->partition[level].pivot]];
878 			pc_size = sp->partition[level].start[sp->partition[level].pivot+1] - sp->partition[level].start[sp->partition[level].pivot];
879 			if ( pc_size > 1 )
880 				sp->nodes++;
881 			if ( vertex_ref_compat ( sp, g, level, p, vertices_new, vertices_old, sp->discarded_vert ) )
882 				if ( ( backtrack_level = subtree_compat ( level + 1, vertices_new, g, sp, pg, perm_sets, leaders, init_bl, prev_bl ) ) != level )
883 				{
884 					if( (pc_size > 1) && (backtrack_level < level) )
885 						sp->bad_nodes++;
886 					return backtrack_level;
887 				}
888 				else
889 					if( pc_size > 1 )
890 						sp->bad_nodes++;
891 					else;
892 			else;
893 #ifdef RF
894 			if ( pc_size > 1 )
895 				mismatch_found_hash = UINT16_MAX;
896 #endif
897 			break;
898 	}
899 	return sp->partition[level].backtrack_level;
900 }
901 
902 int generate_automorphism ( int level, uint16_t p, uint16_t *vertices_old, const struct graph *g, SeqPart *sp, Perm_Group *pg, uint16_t *perm_sets, uint16_t *leaders )
903 {
904 	uint16_t vertices_new[sp->partition[level].start[sp->partition[level].noc]];
905 
906 	if ( vertex_ref_compat ( sp, g, level, p, vertices_new, vertices_old, sp->discarded_vert ) )
907 		return subtree_compat ( ( level + 1 ), vertices_new, g, sp, pg, perm_sets, leaders, level, level );
908 	return sp->partition[level].backtrack_level;
909 }
910 
911 #define CHECK_IF_VALID \
912 {	uint16_t i; \
913 \
914 	for ( i = sp->partition[level].start[pc]; i < v && !are_in_the_same_orbit ( sp->partition[level].vertices[i], sp->partition[level].vertices[v], &(pg->orbits) ); i++ ); \
915 	valid = ( i == v ); \
916 }
917 
918 uint8_t check_automorphism ( const struct graph *g, SeqPart *sp, Perm_Group *pg, int level )
919 {
920 	const uint16_t pc = sp->partition[level].pivot;	/* pivot cell */
921 	const uint16_t pc_size = (uint16_t)( sp->partition[level].start[pc+1] - sp->partition[level].start[pc] );
922 	const uint16_t pivot = sp->partition[level].start[pc];
923 	const Graph_Orbits *go = &(pg->orbits);
924 	uint16_t v;
925 	uint16_t i;
926 	uint8_t tor;	/* type of refinement */
927 	uint8_t valid;
928 	uint8_t count_orbits[g->num_vert];
929 	uint16_t perm_sets[g->num_vert];	/* permutation sets */
930 	uint16_t leaders[g->num_vert];	/* leader of each permutation set */
931 	uint16_t previous_nperms;
932 #ifdef RF
933 	uint8_t mismatch_marked[pc_size];
934 
935 	for ( v = 0; v < pc_size; mismatch_marked[v++] = FALSE );
936 #endif
937 	previous_nperms = pg->nperms;
938 	for ( v = 0; v < (uint16_t)(pg->nperms - 1); perm_sets[v] = (uint16_t)(v + 1), v++ );
939 	for ( ; v < g->num_vert; perm_sets[v++] = UINT16_MAX );
940 	leaders[0] = TRUE;
941 	for ( v = 1; v < g->num_vert; leaders[v++] = FALSE );
942 	tor = VERTEX;
943 #ifdef RF
944 	sp->partition[level].mm_collection = malloc ( sizeof ( MismatchCollection ) );
945 /*	sp->partition[level].mm_collection->mismatch_hash = malloc ( pc_size * sizeof ( uint16_t ) ); */
946 	sp->partition[level].mm_collection->mismatch_hash = calloc ( pc_size, sizeof ( uint16_t ) );
947 	sp->partition[level].mm_collection->counters = calloc ( pc_size, sizeof ( uint16_t ) );
948 	sp->partition[level].mm_collection->nom = pc_size;
949 #endif
950 	for ( v = (uint16_t) ( pivot + 1 ); v < ( pivot + pc_size ); v++ )
951 	{
952 		CHECK_IF_VALID;
953 		/*CHECK_VALID ( sp->partition[level].vertices, &(pg->orbits) );*/
954 		if ( valid )
955 		{
956 			sp->nodes++;
957 			if ( generate_automorphism ( level, sp->partition[level].vertices[v], sp->partition[level].vertices, g, sp, pg, perm_sets, leaders ) > level )
958 			{
959 				add_perm ( pg, sp->discarded_vert, sp->partition[level].nopdv );
960 				perm_sets[pg->nperms-2] = (uint16_t)(pg->nperms - 1);
961 			}
962 			else
963 			{
964 				sp->bad_nodes++;
965 				tor = BACKTR;
966 #ifdef RF
967 				mark_new_mismatch ( &(sp->partition[level]), mismatch_marked, go->vertices, v );
968 #endif
969 			}
970 			if( sp->nodes >= sp->nodes_limit )
971 			{
972 				printf( "Nodes limit:\t%"PRIu32"\nNodes:\t\t%"PRIu32"\n", sp->nodes_limit, sp->nodes );
973 				exit(137); /* TIMEOUT */
974 			}
975 			for ( i = previous_nperms; i < (pg->nperms - 1); perm_sets[i] = (uint16_t)(i+1), i++ );
976 			previous_nperms = pg->nperms;
977 		}
978 #ifdef RF
979 		else
980 		{
981 			mark_old_mismatch ( &(sp->partition[level]), mismatch_marked, go->vertices, v );
982 		}
983 #endif
984 	}
985 	if ( tor == BACKTR )
986 	{
987 #ifdef RF
988 		compute_mismatches ( sp->partition[level].mm_collection );
989 #endif
990 		sp->partition[level].go = malloc ( sizeof ( Graph_Orbits ) );
991 		sp->partition[level].go->vertices = malloc ( g->num_vert * sizeof ( Info_Vertex ) );
992 		sp->partition[level].go->orbits = malloc ( g->num_vert * sizeof ( Info_Orbit ) );
993 		memcpy ( sp->partition[level].go->vertices, pg->orbits.vertices, g->num_vert * sizeof ( Info_Vertex ) );
994 		memcpy ( sp->partition[level].go->orbits, pg->orbits.orbits, g->num_vert * sizeof ( Info_Orbit ) );
995 		sp->partition[level].go->size = pg->orbits.size;
996 	}
997 #ifdef RF
998 	else
999 	{
1000 		free ( sp->partition[level].mm_collection->mismatch_hash );
1001 		free ( sp->partition[level].mm_collection->counters );
1002 		free ( sp->partition[level].mm_collection );
1003 		sp->partition[level].mm_collection = NULL;
1004 	}
1005 #endif
1006 	memset ( count_orbits, 0, g->num_vert * sizeof ( uint8_t ) );
1007 	for ( v = pivot; v < ( pivot + pc_size ); count_orbits[orbit_of(sp->partition[level].vertices[v++])] = 1 );
1008 	for ( sp->partition[level].no = 0, v = 0; v < g->num_vert; sp->partition[level].no = (uint16_t)( sp->partition[level].no + count_orbits[v++] ) );
1009 	return tor;
1010 }
1011 
1012 void find_automorphisms ( const struct graph *g, SeqPart *sp, Perm_Group *pg )
1013 {
1014 	int level;
1015 
1016 	set_base ( pg, sp->discarded_vert );
1017 	for ( level = sp->lp; level >= 0; level-- )
1018 	{
1019 		process_cells_with_no_links ( &(sp->partition[level]), pg );
1020 		if ( sp->partition[level].tor == UNKNOWN )
1021 			sp->partition[level].tor = check_automorphism ( g, sp, pg, level );
1022 	}
1023 }
1024 
1025 int match ( int level, uint16_t *old_vertices, const struct graph *h, Perm_Group *pg, const struct graph *g, SeqPart *sp, Graph_Orbits *go, uint16_t *iso, uint16_t *perm_sets, uint16_t *leaders )
1026 {
1027 	const uint16_t pc = sp->partition[level].pivot;	/* pivot cell */
1028 	const uint16_t pc_size = (uint16_t)(sp->partition[level].start[pc+1] - sp->partition[level].start[pc]); /* pivot cell size */
1029 	uint16_t new_vertices[sp->partition[level].start[sp->partition[level].noc]];
1030 	uint16_t v;
1031 	int result;
1032 
1033 	switch ( sp->partition[level].tor )
1034 	{
1035 		case LAST:
1036 			if ( last_part_compat ( &(sp->partition[level]), old_vertices, g, h ) )
1037 				return ( level );
1038 			break;
1039 		case VERTEX:
1040 			v = sp->partition[level].start[pc];
1041 			if ( vertex_ref_compat ( sp, h, level, old_vertices[v], new_vertices, old_vertices, iso ) )
1042 				if ( ( result = match ( (level + 1), new_vertices, h, pg, g, sp, go, iso, perm_sets, leaders ) ) != level )
1043 					return ( result );
1044 #ifdef RF
1045 			if ( pc_size > 1 )
1046 				mismatch_found_hash = UINT16_MAX;
1047 #endif
1048 			break;
1049 		case SET:
1050 			if ( set_ref_compat ( sp, h, level, new_vertices, old_vertices, iso ) )
1051 				if ( ( result = match ( (level + 1), new_vertices, h, pg, g, sp, go, iso, perm_sets, leaders ) ) != level )
1052 					return ( result );
1053 			break;
1054 		case BACKTR:
1055 		{
1056 			uint8_t valid;
1057 #ifdef RF
1058 			uint16_t counters[pc_size];	/* counters of mismatches */
1059 #endif
1060 			uint16_t new_perm_sets[pg->nelem];
1061 			uint16_t new_leaders[pg->nelem];
1062 			uint16_t w;
1063 #ifdef AGC
1064 			uint16_t valid_vertices[pc_size];
1065 			uint16_t nof_valid;	/* number of valid vertices */
1066 			Info_Vertex info_vertex[pc_size];
1067 			Info_Orbit info_orbit[pc_size];
1068 			Graph_Orbits pgo = { info_vertex, info_orbit, pc_size }; /* partial Graph_Orbits */
1069 
1070 /*			if ( sp->partition[level].no != pc_size ) */
1071 			generate_partial_orbits ( sp, level, old_vertices, pg, &pgo, 0, iso, perm_sets, leaders, new_perm_sets, new_leaders );
1072 #endif
1073 #ifdef RF
1074 			for ( v = 0; v < pc_size; counters[v++] = 0 );
1075 #endif
1076 /*			for ( v = sp->partition[level].start[pc]; v < sp->partition[level].start[pc+1]; v++ ) */
1077 #ifdef AGC
1078 			nof_valid = compute_partial_orbits_order ( level, sp, sp->partition[level].go, &pgo, /*old_vertices,*/ valid_vertices );
1079 			for ( w = 0; w < nof_valid; w++ )
1080 #else
1081 			for ( w = 0; w < pc_size; w++ )
1082 #endif
1083 			{
1084 #ifdef AGC
1085 				v = (uint16_t)(sp->partition[level].start[pc] + valid_vertices[w]);
1086 #else
1087 				uint16_t i;
1088 
1089 				v = (uint16_t)(sp->partition[level].start[pc] + w);
1090 #endif
1091 #ifdef AGC
1092 				valid = ( pg->orbits.orbits[pg->orbits.vertices[old_vertices[v]].belongs_to].size == orbit_size(sp->partition[level].vertices[sp->partition[level].start[pc]]) );
1093 				if ( valid && ( sp->partition[level].no != pc_size ) )
1094 					CHECK_VALID( old_vertices, &pgo );
1095 #else
1096 				for ( i = 0; i < w && !are_in_the_same_orbit ( old_vertices[sp->partition[level].start[pc]+i], old_vertices[v], go ); i++ );
1097 				valid = ( i == w );
1098 #endif
1099 				if ( valid )
1100 				{
1101 					if ( vertex_ref_compat ( sp, h, level, old_vertices[v], new_vertices, old_vertices, iso ) )
1102 						if ( ( result = match ( ( level + 1 ), new_vertices, h, pg, g, sp, go, iso, new_perm_sets, new_leaders ) ) != level )
1103 							return ( result );
1104 #ifdef RF
1105 					if ( !mismatch_in_collection ( sp->partition[level].mm_collection, counters, 1 ) )
1106 						break;
1107 #endif
1108 				}
1109 			}
1110 #ifdef RF
1111 			mismatch_found_hash = UINT16_MAX;
1112 #endif
1113 			break;
1114 		}
1115 	}
1116 	return ( sp->partition[level].backtrack_level );
1117 }
1118