1 /* $Id$ $Revision$ */
2 /* vim:set shiftwidth=4 ts=8: */
3
4 /*************************************************************************
5 * Copyright (c) 2011 AT&T Intellectual Property
6 * All rights reserved. This program and the accompanying materials
7 * are made available under the terms of the Eclipse Public License v1.0
8 * which accompanies this distribution, and is available at
9 * http://www.eclipse.org/legal/epl-v10.html
10 *
11 * Contributors: See CVS logs. Details at http://www.graphviz.org/
12 *************************************************************************/
13
14
15 /*
16 * dot_mincross(g) takes a ranked graphs, and finds an ordering
17 * that avoids edge crossings. clusters are expanded.
18 * N.B. the rank structure is global (not allocated per cluster)
19 * because mincross may compare nodes in different clusters.
20 */
21
22 #include "dot.h"
23
24 /* #define DEBUG */
25 #define MARK(v) (ND_mark(v))
26 #define saveorder(v) (ND_coord(v)).x
27 #define flatindex(v) ND_low(v)
28
29 /* forward declarations */
30 static boolean medians(graph_t * g, int r0, int r1);
31 static int nodeposcmpf(node_t ** n0, node_t ** n1);
32 static int edgeidcmpf(edge_t ** e0, edge_t ** e1);
33 static void flat_breakcycles(graph_t * g);
34 static void flat_reorder(graph_t * g);
35 static void flat_search(graph_t * g, node_t * v);
36 static void init_mincross(graph_t * g);
37 static void merge2(graph_t * g);
38 static void init_mccomp(graph_t * g, int c);
39 static void cleanup2(graph_t * g, int nc);
40 static int mincross_clust(graph_t * par, graph_t * g, int);
41 static int mincross(graph_t * g, int startpass, int endpass, int);
42 static void mincross_step(graph_t * g, int pass);
43 static void mincross_options(graph_t * g);
44 static void save_best(graph_t * g);
45 static void restore_best(graph_t * g);
46 static adjmatrix_t *new_matrix(int i, int j);
47 static void free_matrix(adjmatrix_t * p);
48 static int ordercmpf(int *i0, int *i1);
49 #ifdef DEBUG
50 #if DEBUG > 1
gd_minrank(Agraph_t * g)51 static int gd_minrank(Agraph_t *g) {return GD_minrank(g);}
gd_maxrank(Agraph_t * g)52 static int gd_maxrank(Agraph_t *g) {return GD_maxrank(g);}
gd_rank(Agraph_t * g,int r)53 static rank_t *gd_rank(Agraph_t *g, int r) {return &GD_rank(g)[r];}
nd_order(Agnode_t * v)54 static int nd_order(Agnode_t *v) { return ND_order(v); }
55 #endif
56 void check_rs(graph_t * g, int null_ok);
57 void check_order(void);
58 void check_vlists(graph_t * g);
59 void node_in_root_vlist(node_t * n);
60 #endif
61
62
63 /* mincross parameters */
64 static int MinQuit;
65 static double Convergence;
66
67 static graph_t *Root;
68 static int GlobalMinRank, GlobalMaxRank;
69 static edge_t **TE_list;
70 static int *TI_list;
71 static boolean ReMincross;
72
73 #if DEBUG > 1
indent(graph_t * g)74 static void indent(graph_t* g)
75 {
76 if (g->parent) {
77 fprintf (stderr, " ");
78 indent(g->parent);
79 }
80 }
81
nname(node_t * v)82 static char* nname(node_t* v)
83 {
84 static char buf[1000];
85 if (ND_node_type(v)) {
86 if (ND_ranktype(v) == CLUSTER)
87 sprintf (buf, "v%s_%p", agnameof(ND_clust(v)), v);
88 else
89 sprintf (buf, "v_%p", v);
90 } else
91 sprintf (buf, "%s", agnameof(v));
92 return buf;
93 }
dumpg(graph_t * g)94 static void dumpg (graph_t* g)
95 {
96 int j, i, r;
97 node_t* v;
98 edge_t* e;
99
100 fprintf (stderr, "digraph A {\n");
101 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
102 fprintf (stderr, " subgraph {rank=same ");
103 for (i = 0; i < GD_rank(g)[r].n; i++) {
104 v = GD_rank(g)[r].v[i];
105 if (i > 0)
106 fprintf (stderr, " -> %s", nname(v));
107 else
108 fprintf (stderr, "%s", nname(v));
109 }
110 if (i > 1) fprintf (stderr, " [style=invis]}\n");
111 else fprintf (stderr, " }\n");
112 }
113 for (r = GD_minrank(g); r < GD_maxrank(g); r++) {
114 for (i = 0; i < GD_rank(g)[r].n; i++) {
115 v = GD_rank(g)[r].v[i];
116 for (j = 0; (e = ND_out(v).list[j]); j++) {
117 fprintf (stderr, "%s -> ", nname(v));
118 fprintf (stderr, "%s\n", nname(aghead(e)));
119 }
120 }
121 }
122 fprintf (stderr, "}\n");
123 }
dumpr(graph_t * g,int edges)124 static void dumpr (graph_t* g, int edges)
125 {
126 int j, i, r;
127 node_t* v;
128 edge_t* e;
129
130 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
131 fprintf (stderr, "[%d] ", r);
132 for (i = 0; i < GD_rank(g)[r].n; i++) {
133 v = GD_rank(g)[r].v[i];
134 fprintf (stderr, "%s(%.02f,%d) ", nname(v), saveorder(v),ND_order(v));
135 }
136 fprintf (stderr, "\n");
137 }
138 if (edges == 0) return;
139 for (r = GD_minrank(g); r < GD_maxrank(g); r++) {
140 for (i = 0; i < GD_rank(g)[r].n; i++) {
141 v = GD_rank(g)[r].v[i];
142 for (j = 0; (e = ND_out(v).list[j]); j++) {
143 fprintf (stderr, "%s -> ", nname(v));
144 fprintf (stderr, "%s\n", nname(aghead(e)));
145 }
146 }
147 }
148 }
149 #endif
150
151 typedef struct {
152 Agrec_t h;
153 int x, lo, hi;
154 Agnode_t* np;
155 } info_t;
156
157 #define ND_x(n) (((info_t*)AGDATA(n))->x)
158 #define ND_lo(n) (((info_t*)AGDATA(n))->lo)
159 #define ND_hi(n) (((info_t*)AGDATA(n))->hi)
160 #define ND_np(n) (((info_t*)AGDATA(n))->np)
161 #define ND_idx(n) (ND_order(ND_np(n)))
162
163 static void
emptyComp(graph_t * sg)164 emptyComp (graph_t* sg)
165 {
166 Agnode_t* n;
167 Agnode_t* nxt;
168
169 for (n = agfstnode(sg); n; n = nxt) {
170 nxt = agnxtnode (sg, n);
171 agdelnode(sg,n);
172 }
173 }
174
175 #define isBackedge(e) (ND_idx(aghead(e)) > ND_idx(agtail(e)))
176
177 static Agnode_t*
findSource(Agraph_t * g,Agraph_t * sg)178 findSource (Agraph_t* g, Agraph_t* sg)
179 {
180 Agnode_t* n;
181
182 for (n = agfstnode(sg); n; n = agnxtnode(sg, n))
183 if (agdegree(g,n,1,0) == 0) return n;
184 return NULL;
185 }
186
187 static int
topsort(Agraph_t * g,Agraph_t * sg,Agnode_t ** arr)188 topsort (Agraph_t* g, Agraph_t* sg, Agnode_t** arr)
189 {
190 Agnode_t* n;
191 Agedge_t* e;
192 Agedge_t* nxte;
193 int cnt = 0;
194
195 while ((n = findSource(g, sg))) {
196 arr[cnt++] = ND_np(n);
197 agdelnode(sg, n);
198 for (e = agfstout(g, n); e; e = nxte) {
199 nxte = agnxtout(g, e);
200 agdeledge(g, e);
201 }
202 }
203 return cnt;
204 }
205
206 static int
getComp(graph_t * g,node_t * n,graph_t * comp,int * indices)207 getComp (graph_t* g, node_t* n, graph_t* comp, int* indices)
208 {
209 int backedge = 0;
210 Agedge_t* e;
211
212 ND_x(n) = 1;
213 indices[agnnodes(comp)] = ND_idx(n);
214 agsubnode(comp, n, 1);
215 for (e = agfstout(g,n); e; e = agnxtout(g,e)) {
216 if (isBackedge(e)) backedge++;
217 if (!ND_x(aghead(e)))
218 backedge += getComp(g, aghead(e), comp, indices);
219 }
220 for (e = agfstin(g,n); e; e = agnxtin(g,e)) {
221 if (isBackedge(e)) backedge++;
222 if (!ND_x(agtail(e)))
223 backedge += getComp(g, agtail(e), comp, indices);
224 }
225 return backedge;
226 }
227
228 /* fixLabelOrder:
229 * For each pair of nodes (labels), we add an edge
230 */
231 static void
fixLabelOrder(graph_t * g,rank_t * rk)232 fixLabelOrder (graph_t* g, rank_t* rk)
233 {
234 int cnt, haveBackedge = FALSE;
235 Agnode_t** arr;
236 int* indices;
237 Agraph_t* sg;
238 Agnode_t* n;
239 Agnode_t* nxtp;
240 Agnode_t* v;
241
242 for (n = agfstnode(g); n; n = nxtp) {
243 v = nxtp = agnxtnode(g, n);
244 for (; v; v = agnxtnode(g, v)) {
245 if (ND_hi(v) <= ND_lo(n)) {
246 haveBackedge = TRUE;
247 agedge(g, v, n, NULL, 1);
248 }
249 else if (ND_hi(n) <= ND_lo(v)) {
250 agedge(g, n, v, NULL, 1);
251 }
252 }
253 }
254 if (!haveBackedge) return;
255
256 sg = agsubg(g, "comp", 1);
257 arr = N_NEW(agnnodes(g), Agnode_t*);
258 indices = N_NEW(agnnodes(g), int);
259
260 for (n = agfstnode(g); n; n = agnxtnode(g,n)) {
261 if (ND_x(n) || (agdegree(g,n,1,1) == 0)) continue;
262 if (getComp(g, n, sg, indices)) {
263 int i, sz = agnnodes(sg);
264 cnt = topsort (g, sg, arr);
265 assert (cnt == sz);
266 qsort(indices, cnt, sizeof(int), (qsort_cmpf)ordercmpf);
267 for (i = 0; i < sz; i++) {
268 ND_order(arr[i]) = indices[i];
269 rk->v[indices[i]] = arr[i];
270 }
271 }
272 emptyComp(sg);
273 }
274 free (arr);
275 }
276
277 /* checkLabelOrder:
278 * Check that the ordering of labels for flat edges is consistent.
279 * This is necessary because dot_position will attempt to force the label
280 * to be between the edge's vertices. This can lead to an infeasible problem.
281 *
282 * We check each rank for any flat edge labels (as dummy nodes) and create a
283 * graph with a node for each label. If the graph contains more than 1 node, we
284 * call fixLabelOrder to see if there really is a problem and, if so, fix it.
285 */
286 void
checkLabelOrder(graph_t * g)287 checkLabelOrder (graph_t* g)
288 {
289 int j, r, lo, hi;
290 graph_t* lg = NULL;
291 char buf[BUFSIZ];
292 rank_t* rk;
293 Agnode_t* u;
294 Agnode_t* n;
295 Agedge_t* e;
296
297 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
298 rk = GD_rank(g)+r;
299 for (j = 0; j < rk->n; j++) {
300 u = rk->v[j];
301 if ((e = (edge_t*)ND_alg(u))) {
302 if (!lg) lg = agopen ("lg", Agstrictdirected, 0);
303 sprintf (buf, "%d", j);
304 n = agnode(lg, buf, 1);
305 agbindrec(n, "info", sizeof(info_t), 1);
306 lo = ND_order(aghead(ND_out(u).list[0]));
307 hi = ND_order(aghead(ND_out(u).list[1]));
308 if (lo > hi) {
309 int tmp;
310 tmp = lo;
311 lo = hi;
312 hi = tmp;
313 }
314 ND_lo(n) = lo;
315 ND_hi(n) = hi;
316 ND_np(n) = u;
317 }
318 }
319 if (lg) {
320 if (agnnodes(lg) > 1) fixLabelOrder (lg, rk);
321 agclose(lg);
322 lg = NULL;
323 }
324 }
325 }
326
327 /* dot_mincross:
328 * Minimize edge crossings
329 * Note that nodes are not placed into GD_rank(g) until mincross()
330 * is called.
331 */
dot_mincross(graph_t * g,int doBalance)332 void dot_mincross(graph_t * g, int doBalance)
333 {
334 int c, nc;
335 char *s;
336
337 init_mincross(g);
338
339 for (nc = c = 0; c < GD_comp(g).size; c++) {
340 init_mccomp(g, c);
341 nc += mincross(g, 0, 2, doBalance);
342 }
343
344 merge2(g);
345
346 /* run mincross on contents of each cluster */
347 for (c = 1; c <= GD_n_cluster(g); c++) {
348 nc += mincross_clust(g, GD_clust(g)[c], doBalance);
349 #ifdef DEBUG
350 check_vlists(GD_clust(g)[c]);
351 check_order();
352 #endif
353 }
354
355 if ((GD_n_cluster(g) > 0)
356 && (!(s = agget(g, "remincross")) || (mapbool(s)))) {
357 mark_lowclusters(g);
358 ReMincross = TRUE;
359 nc = mincross(g, 2, 2, doBalance);
360 #ifdef DEBUG
361 for (c = 1; c <= GD_n_cluster(g); c++)
362 check_vlists(GD_clust(g)[c]);
363 #endif
364 }
365 cleanup2(g, nc);
366 }
367
new_matrix(int i,int j)368 static adjmatrix_t *new_matrix(int i, int j)
369 {
370 adjmatrix_t *rv = NEW(adjmatrix_t);
371 rv->nrows = i;
372 rv->ncols = j;
373 rv->data = N_NEW(i * j, char);
374 return rv;
375 }
376
free_matrix(adjmatrix_t * p)377 static void free_matrix(adjmatrix_t * p)
378 {
379 if (p) {
380 free(p->data);
381 free(p);
382 }
383 }
384
385 #define ELT(M,i,j) (M->data[((i)*M->ncols)+(j)])
386
init_mccomp(graph_t * g,int c)387 static void init_mccomp(graph_t * g, int c)
388 {
389 int r;
390
391 GD_nlist(g) = GD_comp(g).list[c];
392 if (c > 0) {
393 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
394 GD_rank(g)[r].v = GD_rank(g)[r].v + GD_rank(g)[r].n;
395 GD_rank(g)[r].n = 0;
396 }
397 }
398 }
399
betweenclust(edge_t * e)400 static int betweenclust(edge_t * e)
401 {
402 while (ED_to_orig(e))
403 e = ED_to_orig(e);
404 return (ND_clust(agtail(e)) != ND_clust(aghead(e)));
405 }
406
do_ordering_node(graph_t * g,node_t * n,int outflag)407 static void do_ordering_node (graph_t * g, node_t* n, int outflag)
408 {
409 int i, ne;
410 node_t *u, *v;
411 edge_t *e, *f, *fe;
412 edge_t **sortlist = TE_list;
413
414 if (ND_clust(n))
415 return;
416 if (outflag) {
417 for (i = ne = 0; (e = ND_out(n).list[i]); i++)
418 if (!betweenclust(e))
419 sortlist[ne++] = e;
420 } else {
421 for (i = ne = 0; (e = ND_in(n).list[i]); i++)
422 if (!betweenclust(e))
423 sortlist[ne++] = e;
424 }
425 if (ne <= 1)
426 return;
427 /* write null terminator at end of list.
428 requires +1 in TE_list alloccation */
429 sortlist[ne] = 0;
430 qsort(sortlist, ne, sizeof(sortlist[0]), (qsort_cmpf) edgeidcmpf);
431 for (ne = 1; (f = sortlist[ne]); ne++) {
432 e = sortlist[ne - 1];
433 if (outflag) {
434 u = aghead(e);
435 v = aghead(f);
436 } else {
437 u = agtail(e);
438 v = agtail(f);
439 }
440 if (find_flat_edge(u, v))
441 return;
442 fe = new_virtual_edge(u, v, NULL);
443 ED_edge_type(fe) = FLATORDER;
444 flat_edge(g, fe);
445 }
446 }
447
do_ordering(graph_t * g,int outflag)448 static void do_ordering(graph_t * g, int outflag)
449 {
450 /* Order all nodes in graph */
451 node_t *n;
452
453 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
454 do_ordering_node (g, n, outflag);
455 }
456 }
457
do_ordering_for_nodes(graph_t * g)458 static void do_ordering_for_nodes(graph_t * g)
459 {
460 /* Order nodes which have the "ordered" attribute */
461 node_t *n;
462 const char *ordering;
463
464 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
465 if ((ordering = late_string(n, N_ordering, NULL))) {
466 if (streq(ordering, "out"))
467 do_ordering_node(g, n, TRUE);
468 else if (streq(ordering, "in"))
469 do_ordering_node(g, n, FALSE);
470 else if (ordering[0])
471 agerr(AGERR, "ordering '%s' not recognized for node '%s'.\n", ordering, agnameof(n));
472 }
473 }
474 }
475
476 /* ordered_edges:
477 * handle case where graph specifies edge ordering
478 * If the graph does not have an ordering attribute, we then
479 * check for nodes having the attribute.
480 * Note that, in this implementation, the value of G_ordering
481 * dominates the value of N_ordering.
482 */
ordered_edges(graph_t * g)483 static void ordered_edges(graph_t * g)
484 {
485 char *ordering;
486
487 if (!G_ordering && !N_ordering)
488 return;
489 if ((ordering = late_string(g, G_ordering, NULL))) {
490 if (streq(ordering, "out"))
491 do_ordering(g, TRUE);
492 else if (streq(ordering, "in"))
493 do_ordering(g, FALSE);
494 else if (ordering[0])
495 agerr(AGERR, "ordering '%s' not recognized.\n", ordering);
496 }
497 else
498 {
499 graph_t *subg;
500
501 for (subg = agfstsubg(g); subg; subg = agnxtsubg(subg)) {
502 /* clusters are processed by separate calls to ordered_edges */
503 if (!is_cluster(subg))
504 ordered_edges(subg);
505 }
506 if (N_ordering) do_ordering_for_nodes (g);
507 }
508 }
509
mincross_clust(graph_t * par,graph_t * g,int doBalance)510 static int mincross_clust(graph_t * par, graph_t * g, int doBalance)
511 {
512 int c, nc;
513
514 expand_cluster(g);
515 ordered_edges(g);
516 flat_breakcycles(g);
517 flat_reorder(g);
518 nc = mincross(g, 2, 2, doBalance);
519
520 for (c = 1; c <= GD_n_cluster(g); c++)
521 nc += mincross_clust(g, GD_clust(g)[c], doBalance);
522
523 save_vlist(g);
524 return nc;
525 }
526
left2right(graph_t * g,node_t * v,node_t * w)527 static int left2right(graph_t * g, node_t * v, node_t * w)
528 {
529 adjmatrix_t *M;
530 int rv;
531
532 /* CLUSTER indicates orig nodes of clusters, and vnodes of skeletons */
533 if (ReMincross == FALSE) {
534 if ((ND_clust(v) != ND_clust(w)) && (ND_clust(v)) && (ND_clust(w))) {
535 /* the following allows cluster skeletons to be swapped */
536 if ((ND_ranktype(v) == CLUSTER)
537 && (ND_node_type(v) == VIRTUAL))
538 return FALSE;
539 if ((ND_ranktype(w) == CLUSTER)
540 && (ND_node_type(w) == VIRTUAL))
541 return FALSE;
542 return TRUE;
543 /*return ((ND_ranktype(v) != CLUSTER) && (ND_ranktype(w) != CLUSTER)); */
544 }
545 } else {
546 if ((ND_clust(v)) != (ND_clust(w)))
547 return TRUE;
548 }
549 M = GD_rank(g)[ND_rank(v)].flat;
550 if (M == NULL)
551 rv = FALSE;
552 else {
553 if (GD_flip(g)) {
554 node_t *t = v;
555 v = w;
556 w = t;
557 }
558 rv = ELT(M, flatindex(v), flatindex(w));
559 }
560 return rv;
561 }
562
in_cross(node_t * v,node_t * w)563 static int in_cross(node_t * v, node_t * w)
564 {
565 register edge_t **e1, **e2;
566 register int inv, cross = 0, t;
567
568 for (e2 = ND_in(w).list; *e2; e2++) {
569 register int cnt = ED_xpenalty(*e2);
570
571 inv = ND_order((agtail(*e2)));
572
573 for (e1 = ND_in(v).list; *e1; e1++) {
574 t = ND_order(agtail(*e1)) - inv;
575 if ((t > 0)
576 || ((t == 0)
577 && ( ED_tail_port(*e1).p.x > ED_tail_port(*e2).p.x)))
578 cross += ED_xpenalty(*e1) * cnt;
579 }
580 }
581 return cross;
582 }
583
out_cross(node_t * v,node_t * w)584 static int out_cross(node_t * v, node_t * w)
585 {
586 register edge_t **e1, **e2;
587 register int inv, cross = 0, t;
588
589 for (e2 = ND_out(w).list; *e2; e2++) {
590 register int cnt = ED_xpenalty(*e2);
591 inv = ND_order(aghead(*e2));
592
593 for (e1 = ND_out(v).list; *e1; e1++) {
594 t = ND_order(aghead(*e1)) - inv;
595 if ((t > 0)
596 || ((t == 0)
597 && ((ED_head_port(*e1)).p.x > (ED_head_port(*e2)).p.x)))
598 cross += ((ED_xpenalty(*e1)) * cnt);
599 }
600 }
601 return cross;
602
603 }
604
exchange(node_t * v,node_t * w)605 static void exchange(node_t * v, node_t * w)
606 {
607 int vi, wi, r;
608
609 r = ND_rank(v);
610 vi = ND_order(v);
611 wi = ND_order(w);
612 ND_order(v) = wi;
613 GD_rank(Root)[r].v[wi] = v;
614 ND_order(w) = vi;
615 GD_rank(Root)[r].v[vi] = w;
616 }
617
balanceNodes(graph_t * g,int r,node_t * v,node_t * w)618 static void balanceNodes(graph_t * g, int r, node_t * v, node_t * w)
619 {
620 node_t *s; /* separator node */
621 int sepIndex = 0;
622 int nullType; /* type of null nodes */
623 int cntDummy = 0, cntOri = 0;
624 int k = 0, m = 0, k1 = 0, m1 = 0, i = 0;
625
626 /* we only consider v and w of different types */
627 if (ND_node_type(v) == ND_node_type(w))
628 return;
629
630 /* count the number of dummy and original nodes */
631 for (i = 0; i < GD_rank(g)[r].n; i++) {
632 if (ND_node_type(GD_rank(g)[r].v[i]) == NORMAL)
633 cntOri++;
634 else
635 cntDummy++;
636 }
637
638 if (cntOri < cntDummy) {
639 if (ND_node_type(v) == NORMAL)
640 s = v;
641 else
642 s = w;
643 } else {
644 if (ND_node_type(v) == NORMAL)
645 s = w;
646 else
647 s = v;
648 }
649
650 /* get the separator node index */
651 for (i = 0; i < GD_rank(g)[r].n; i++) {
652 if (GD_rank(g)[r].v[i] == s)
653 sepIndex = i;
654 }
655
656 nullType = (ND_node_type(s) == NORMAL) ? VIRTUAL : NORMAL;
657
658 /* count the number of null nodes to the left and
659 * right of the separator node
660 */
661 for (i = sepIndex - 1; i >= 0; i--) {
662 if (ND_node_type(GD_rank(g)[r].v[i]) == nullType)
663 k++;
664 else
665 break;
666 }
667
668 for (i = sepIndex + 1; i < GD_rank(g)[r].n; i++) {
669 if (ND_node_type(GD_rank(g)[r].v[i]) == nullType)
670 m++;
671 else
672 break;
673 }
674
675 /* now exchange v,w and calculate the same counts */
676
677 exchange(v, w);
678
679 /* get the separator node index */
680 for (i = 0; i < GD_rank(g)[r].n; i++) {
681 if (GD_rank(g)[r].v[i] == s)
682 sepIndex = i;
683 }
684
685 /* count the number of null nodes to the left and
686 * right of the separator node
687 */
688 for (i = sepIndex - 1; i >= 0; i--) {
689 if (ND_node_type(GD_rank(g)[r].v[i]) == nullType)
690 k1++;
691 else
692 break;
693 }
694
695 for (i = sepIndex + 1; i < GD_rank(g)[r].n; i++) {
696 if (ND_node_type(GD_rank(g)[r].v[i]) == nullType)
697 m1++;
698 else
699 break;
700 }
701
702 if (abs(k1 - m1) > abs(k - m)) {
703 exchange(v, w); //revert to the original ordering
704 }
705 }
706
balance(graph_t * g)707 static int balance(graph_t * g)
708 {
709 int i, c0, c1, rv;
710 node_t *v, *w;
711 int r;
712
713 rv = 0;
714
715 for (r = GD_maxrank(g); r >= GD_minrank(g); r--) {
716
717 GD_rank(g)[r].candidate = FALSE;
718 for (i = 0; i < GD_rank(g)[r].n - 1; i++) {
719 v = GD_rank(g)[r].v[i];
720 w = GD_rank(g)[r].v[i + 1];
721 assert(ND_order(v) < ND_order(w));
722 if (left2right(g, v, w))
723 continue;
724 c0 = c1 = 0;
725 if (r > 0) {
726 c0 += in_cross(v, w);
727 c1 += in_cross(w, v);
728 }
729
730 if (GD_rank(g)[r + 1].n > 0) {
731 c0 += out_cross(v, w);
732 c1 += out_cross(w, v);
733 }
734 #if 0
735 if ((c1 < c0) || ((c0 > 0) && reverse && (c1 == c0))) {
736 exchange(v, w);
737 rv += (c0 - c1);
738 GD_rank(Root)[r].valid = FALSE;
739 GD_rank(g)[r].candidate = TRUE;
740
741 if (r > GD_minrank(g)) {
742 GD_rank(Root)[r - 1].valid = FALSE;
743 GD_rank(g)[r - 1].candidate = TRUE;
744 }
745 if (r < GD_maxrank(g)) {
746 GD_rank(Root)[r + 1].valid = FALSE;
747 GD_rank(g)[r + 1].candidate = TRUE;
748 }
749 }
750 #endif
751
752 if (c1 <= c0) {
753 balanceNodes(g, r, v, w);
754 }
755 }
756 }
757 return rv;
758 }
759
transpose_step(graph_t * g,int r,int reverse)760 static int transpose_step(graph_t * g, int r, int reverse)
761 {
762 int i, c0, c1, rv;
763 node_t *v, *w;
764
765 rv = 0;
766 GD_rank(g)[r].candidate = FALSE;
767 for (i = 0; i < GD_rank(g)[r].n - 1; i++) {
768 v = GD_rank(g)[r].v[i];
769 w = GD_rank(g)[r].v[i + 1];
770 assert(ND_order(v) < ND_order(w));
771 if (left2right(g, v, w))
772 continue;
773 c0 = c1 = 0;
774 if (r > 0) {
775 c0 += in_cross(v, w);
776 c1 += in_cross(w, v);
777 }
778 if (GD_rank(g)[r + 1].n > 0) {
779 c0 += out_cross(v, w);
780 c1 += out_cross(w, v);
781 }
782 if ((c1 < c0) || ((c0 > 0) && reverse && (c1 == c0))) {
783 exchange(v, w);
784 rv += (c0 - c1);
785 GD_rank(Root)[r].valid = FALSE;
786 GD_rank(g)[r].candidate = TRUE;
787
788 if (r > GD_minrank(g)) {
789 GD_rank(Root)[r - 1].valid = FALSE;
790 GD_rank(g)[r - 1].candidate = TRUE;
791 }
792 if (r < GD_maxrank(g)) {
793 GD_rank(Root)[r + 1].valid = FALSE;
794 GD_rank(g)[r + 1].candidate = TRUE;
795 }
796 }
797 }
798 return rv;
799 }
800
transpose(graph_t * g,int reverse)801 static void transpose(graph_t * g, int reverse)
802 {
803 int r, delta;
804
805 for (r = GD_minrank(g); r <= GD_maxrank(g); r++)
806 GD_rank(g)[r].candidate = TRUE;
807 do {
808 delta = 0;
809 #ifdef NOTDEF
810 /* don't run both the upward and downward passes- they cancel.
811 i tried making it depend on whether an odd or even pass,
812 but that didn't help. */
813 for (r = GD_maxrank(g); r >= GD_minrank(g); r--)
814 if (GD_rank(g)[r].candidate)
815 delta += transpose_step(g, r, reverse);
816 #endif
817 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
818 if (GD_rank(g)[r].candidate) {
819 delta += transpose_step(g, r, reverse);
820 }
821 }
822 /*} while (delta > ncross(g)*(1.0 - Convergence)); */
823 } while (delta >= 1);
824 }
825
mincross(graph_t * g,int startpass,int endpass,int doBalance)826 static int mincross(graph_t * g, int startpass, int endpass, int doBalance)
827 {
828 int maxthispass, iter, trying, pass;
829 int cur_cross, best_cross;
830
831 if (startpass > 1) {
832 cur_cross = best_cross = ncross(g);
833 save_best(g);
834 } else
835 cur_cross = best_cross = INT_MAX;
836 for (pass = startpass; pass <= endpass; pass++) {
837 if (pass <= 1) {
838 maxthispass = MIN(4, MaxIter);
839 if (g == dot_root(g))
840 build_ranks(g, pass);
841 if (pass == 0)
842 flat_breakcycles(g);
843 flat_reorder(g);
844
845 if ((cur_cross = ncross(g)) <= best_cross) {
846 save_best(g);
847 best_cross = cur_cross;
848 }
849 trying = 0;
850 } else {
851 maxthispass = MaxIter;
852 if (cur_cross > best_cross)
853 restore_best(g);
854 cur_cross = best_cross;
855 }
856 trying = 0;
857 for (iter = 0; iter < maxthispass; iter++) {
858 if (Verbose)
859 fprintf(stderr,
860 "mincross: pass %d iter %d trying %d cur_cross %d best_cross %d\n",
861 pass, iter, trying, cur_cross, best_cross);
862 if (trying++ >= MinQuit)
863 break;
864 if (cur_cross == 0)
865 break;
866 mincross_step(g, iter);
867 if ((cur_cross = ncross(g)) <= best_cross) {
868 save_best(g);
869 if (cur_cross < Convergence * best_cross)
870 trying = 0;
871 best_cross = cur_cross;
872 }
873 }
874 if (cur_cross == 0)
875 break;
876 }
877 if (cur_cross > best_cross)
878 restore_best(g);
879 if (best_cross > 0) {
880 transpose(g, FALSE);
881 best_cross = ncross(g);
882 }
883 if (doBalance) {
884 for (iter = 0; iter < maxthispass; iter++)
885 balance(g);
886 }
887
888 return best_cross;
889 }
890
restore_best(graph_t * g)891 static void restore_best(graph_t * g)
892 {
893 node_t *n;
894 int i, r;
895
896 /* for (n = GD_nlist(g); n; n = ND_next(n)) */
897 /* ND_order(n) = saveorder(n); */
898 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
899 for (i = 0; i < GD_rank(g)[r].n; i++) {
900 n = GD_rank(g)[r].v[i];
901 ND_order(n) = saveorder(n);
902 }
903 }
904 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
905 GD_rank(Root)[r].valid = FALSE;
906 qsort(GD_rank(g)[r].v, GD_rank(g)[r].n, sizeof(GD_rank(g)[0].v[0]),
907 (qsort_cmpf) nodeposcmpf);
908 }
909 }
910
save_best(graph_t * g)911 static void save_best(graph_t * g)
912 {
913 node_t *n;
914 /* for (n = GD_nlist(g); n; n = ND_next(n)) */
915 /* saveorder(n) = ND_order(n); */
916 int i, r;
917 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
918 for (i = 0; i < GD_rank(g)[r].n; i++) {
919 n = GD_rank(g)[r].v[i];
920 saveorder(n) = ND_order(n);
921 }
922 }
923 }
924
925 /* merges the connected components of g */
merge_components(graph_t * g)926 static void merge_components(graph_t * g)
927 {
928 int c;
929 node_t *u, *v;
930
931 if (GD_comp(g).size <= 1)
932 return;
933 u = NULL;
934 for (c = 0; c < GD_comp(g).size; c++) {
935 v = GD_comp(g).list[c];
936 if (u)
937 ND_next(u) = v;
938 ND_prev(v) = u;
939 while (ND_next(v)) {
940 v = ND_next(v);
941 }
942 u = v;
943 }
944 GD_comp(g).size = 1;
945 GD_nlist(g) = GD_comp(g).list[0];
946 GD_minrank(g) = GlobalMinRank;
947 GD_maxrank(g) = GlobalMaxRank;
948 }
949
950 /* merge connected components, create globally consistent rank lists */
merge2(graph_t * g)951 static void merge2(graph_t * g)
952 {
953 int i, r;
954 node_t *v;
955
956 /* merge the components and rank limits */
957 merge_components(g);
958
959 /* install complete ranks */
960 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
961 GD_rank(g)[r].n = GD_rank(g)[r].an;
962 GD_rank(g)[r].v = GD_rank(g)[r].av;
963 for (i = 0; i < GD_rank(g)[r].n; i++) {
964 v = GD_rank(g)[r].v[i];
965 if (v == NULL) {
966 if (Verbose)
967 fprintf(stderr,
968 "merge2: graph %s, rank %d has only %d < %d nodes\n",
969 agnameof(g), r, i, GD_rank(g)[r].n);
970 GD_rank(g)[r].n = i;
971 break;
972 }
973 ND_order(v) = i;
974 }
975 }
976 }
977
cleanup2(graph_t * g,int nc)978 static void cleanup2(graph_t * g, int nc)
979 {
980 int i, j, r, c;
981 node_t *v;
982 edge_t *e;
983
984 if (TI_list) {
985 free(TI_list);
986 TI_list = NULL;
987 }
988 if (TE_list) {
989 free(TE_list);
990 TE_list = NULL;
991 }
992 /* fix vlists of clusters */
993 for (c = 1; c <= GD_n_cluster(g); c++)
994 rec_reset_vlists(GD_clust(g)[c]);
995
996 /* remove node temporary edges for ordering nodes */
997 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
998 for (i = 0; i < GD_rank(g)[r].n; i++) {
999 v = GD_rank(g)[r].v[i];
1000 ND_order(v) = i;
1001 if (ND_flat_out(v).list) {
1002 for (j = 0; (e = ND_flat_out(v).list[j]); j++)
1003 if (ED_edge_type(e) == FLATORDER) {
1004 delete_flat_edge(e);
1005 free(e->base.data);
1006 free(e);
1007 j--;
1008 }
1009 }
1010 }
1011 free_matrix(GD_rank(g)[r].flat);
1012 }
1013 if (Verbose)
1014 fprintf(stderr, "mincross %s: %d crossings, %.2f secs.\n",
1015 agnameof(g), nc, elapsed_sec());
1016 }
1017
neighbor(node_t * v,int dir)1018 static node_t *neighbor(node_t * v, int dir)
1019 {
1020 node_t *rv;
1021
1022 rv = NULL;
1023 assert(v);
1024 if (dir < 0) {
1025 if (ND_order(v) > 0)
1026 rv = GD_rank(Root)[ND_rank(v)].v[ND_order(v) - 1];
1027 } else
1028 rv = GD_rank(Root)[ND_rank(v)].v[ND_order(v) + 1];
1029 assert((rv == 0) || (ND_order(rv)-ND_order(v))*dir > 0);
1030 return rv;
1031 }
1032
is_a_normal_node_of(graph_t * g,node_t * v)1033 static int is_a_normal_node_of(graph_t * g, node_t * v)
1034 {
1035 return ((ND_node_type(v) == NORMAL) && agcontains(g, v));
1036 }
1037
is_a_vnode_of_an_edge_of(graph_t * g,node_t * v)1038 static int is_a_vnode_of_an_edge_of(graph_t * g, node_t * v)
1039 {
1040 if ((ND_node_type(v) == VIRTUAL)
1041 && (ND_in(v).size == 1) && (ND_out(v).size == 1)) {
1042 edge_t *e = ND_out(v).list[0];
1043 while (ED_edge_type(e) != NORMAL)
1044 e = ED_to_orig(e);
1045 if (agcontains(g, e))
1046 return TRUE;
1047 }
1048 return FALSE;
1049 }
1050
inside_cluster(graph_t * g,node_t * v)1051 static int inside_cluster(graph_t * g, node_t * v)
1052 {
1053 return (is_a_normal_node_of(g, v) | is_a_vnode_of_an_edge_of(g, v));
1054 }
1055
furthestnode(graph_t * g,node_t * v,int dir)1056 static node_t *furthestnode(graph_t * g, node_t * v, int dir)
1057 {
1058 node_t *u, *rv;
1059
1060 rv = u = v;
1061 while ((u = neighbor(u, dir))) {
1062 if (is_a_normal_node_of(g, u))
1063 rv = u;
1064 else if (is_a_vnode_of_an_edge_of(g, u))
1065 rv = u;
1066 }
1067 return rv;
1068 }
1069
save_vlist(graph_t * g)1070 void save_vlist(graph_t * g)
1071 {
1072 int r;
1073
1074 if (GD_rankleader(g))
1075 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1076 GD_rankleader(g)[r] = GD_rank(g)[r].v[0];
1077 }
1078 }
1079
rec_save_vlists(graph_t * g)1080 void rec_save_vlists(graph_t * g)
1081 {
1082 int c;
1083
1084 save_vlist(g);
1085 for (c = 1; c <= GD_n_cluster(g); c++)
1086 rec_save_vlists(GD_clust(g)[c]);
1087 }
1088
1089
rec_reset_vlists(graph_t * g)1090 void rec_reset_vlists(graph_t * g)
1091 {
1092 int r, c;
1093 node_t *u, *v, *w;
1094
1095 /* fix vlists of sub-clusters */
1096 for (c = 1; c <= GD_n_cluster(g); c++)
1097 rec_reset_vlists(GD_clust(g)[c]);
1098
1099 if (GD_rankleader(g))
1100 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1101 v = GD_rankleader(g)[r];
1102 #ifdef DEBUG
1103 node_in_root_vlist(v);
1104 #endif
1105 u = furthestnode(g, v, -1);
1106 w = furthestnode(g, v, 1);
1107 GD_rankleader(g)[r] = u;
1108 #ifdef DEBUG
1109 assert(GD_rank(dot_root(g))[r].v[ND_order(u)] == u);
1110 #endif
1111 GD_rank(g)[r].v = GD_rank(dot_root(g))[r].v + ND_order(u);
1112 GD_rank(g)[r].n = ND_order(w) - ND_order(u) + 1;
1113 }
1114 }
1115
1116 /* realFillRanks:
1117 * The structures in crossing minimization and positioning require
1118 * that clusters have some node on each rank. This function recursively
1119 * guarantees this property. It takes into account nodes and edges in
1120 * a cluster, the latter causing dummy nodes for intervening ranks.
1121 * For any rank without node, we create a real node of small size. This
1122 * is stored in the subgraph sg, for easy removal later.
1123 *
1124 * I believe it is not necessary to do this for the root graph, as these
1125 * are laid out one component at a time and these will necessarily have a
1126 * node on each rank from source to sink levels.
1127 */
1128 static Agraph_t*
realFillRanks(Agraph_t * g,int rnks[],int rnks_sz,Agraph_t * sg)1129 realFillRanks (Agraph_t* g, int rnks[], int rnks_sz, Agraph_t* sg)
1130 {
1131 int i, c;
1132 Agedge_t* e;
1133 Agnode_t* n;
1134
1135 for (c = 1; c <= GD_n_cluster(g); c++)
1136 sg = realFillRanks (GD_clust(g)[c], rnks, rnks_sz, sg);
1137
1138 if (dot_root(g) == g)
1139 return sg;
1140 memset (rnks, 0, sizeof(int)*rnks_sz);
1141 for (n = agfstnode(g); n; n = agnxtnode(g,n)) {
1142 rnks[ND_rank(n)] = 1;
1143 for (e = agfstout(g,n); e; e = agnxtout(g,e)) {
1144 for (i = ND_rank(n)+1; i <= ND_rank(aghead(e)); i++)
1145 rnks[i] = 1;
1146 }
1147 }
1148 for (i = GD_minrank(g); i <= GD_maxrank(g); i++) {
1149 if (rnks[i] == 0) {
1150 if (!sg) {
1151 sg = agsubg (dot_root(g), "_new_rank", 1);
1152 }
1153 n = agnode (sg, NULL, 1);
1154 agbindrec(n, "Agnodeinfo_t", sizeof(Agnodeinfo_t), TRUE);
1155 ND_rank(n) = i;
1156 ND_lw(n) = ND_rw(n) = 0.5;
1157 ND_ht(n) = 1;
1158 ND_UF_size(n) = 1;
1159 alloc_elist(4, ND_in(n));
1160 alloc_elist(4, ND_out(n));
1161 agsubnode (g, n, 1);
1162 }
1163 }
1164 return sg;
1165 }
1166
1167 static void
fillRanks(Agraph_t * g)1168 fillRanks (Agraph_t* g)
1169 {
1170 Agraph_t* sg;
1171 int rnks_sz = GD_maxrank(g) + 2;
1172 int* rnks = N_NEW(rnks_sz, int);
1173 sg = realFillRanks (g, rnks, rnks_sz, NULL);
1174 free (rnks);
1175 }
1176
init_mincross(graph_t * g)1177 static void init_mincross(graph_t * g)
1178 {
1179 int size;
1180
1181 if (Verbose)
1182 start_timer();
1183
1184 ReMincross = FALSE;
1185 Root = g;
1186 /* alloc +1 for the null terminator usage in do_ordering() */
1187 /* also, the +1 avoids attempts to alloc 0 sizes, something
1188 that efence complains about */
1189 size = agnedges(dot_root(g)) + 1;
1190 TE_list = N_NEW(size, edge_t *);
1191 TI_list = N_NEW(size, int);
1192 mincross_options(g);
1193 if (GD_flags(g) & NEW_RANK)
1194 fillRanks (g);
1195 class2(g);
1196 decompose(g, 1);
1197 allocate_ranks(g);
1198 ordered_edges(g);
1199 GlobalMinRank = GD_minrank(g);
1200 GlobalMaxRank = GD_maxrank(g);
1201 }
1202
flat_rev(Agraph_t * g,Agedge_t * e)1203 static void flat_rev(Agraph_t * g, Agedge_t * e)
1204 {
1205 int j;
1206 Agedge_t *rev;
1207
1208 if (!ND_flat_out(aghead(e)).list)
1209 rev = NULL;
1210 else
1211 for (j = 0; (rev = ND_flat_out(aghead(e)).list[j]); j++)
1212 if (aghead(rev) == agtail(e))
1213 break;
1214 if (rev) {
1215 merge_oneway(e, rev);
1216 if ((ED_edge_type(rev) == FLATORDER)
1217 && (ED_to_orig(rev) == 0))
1218 ED_to_orig(rev) = e;
1219 elist_append(e, ND_other(agtail(e)));
1220 } else {
1221 rev = new_virtual_edge(aghead(e), agtail(e), e);
1222 if (ED_edge_type(e) == FLATORDER)
1223 ED_edge_type(rev) = FLATORDER;
1224 else
1225 ED_edge_type(rev) = REVERSED;
1226 ED_label(rev) = ED_label(e);
1227 flat_edge(g, rev);
1228 }
1229 }
1230
flat_search(graph_t * g,node_t * v)1231 static void flat_search(graph_t * g, node_t * v)
1232 {
1233 int i;
1234 boolean hascl;
1235 edge_t *e;
1236 adjmatrix_t *M = GD_rank(g)[ND_rank(v)].flat;
1237
1238 ND_mark(v) = TRUE;
1239 ND_onstack(v) = TRUE;
1240 hascl = (GD_n_cluster(dot_root(g)) > 0);
1241 if (ND_flat_out(v).list)
1242 for (i = 0; (e = ND_flat_out(v).list[i]); i++) {
1243 if (hascl
1244 && NOT(agcontains(g, agtail(e)) && agcontains(g, aghead(e))))
1245 continue;
1246 if (ED_weight(e) == 0)
1247 continue;
1248 if (ND_onstack(aghead(e)) == TRUE) {
1249 assert(flatindex(aghead(e)) < M->nrows);
1250 assert(flatindex(agtail(e)) < M->ncols);
1251 ELT(M, flatindex(aghead(e)), flatindex(agtail(e))) = 1;
1252 delete_flat_edge(e);
1253 i--;
1254 if (ED_edge_type(e) == FLATORDER)
1255 continue;
1256 flat_rev(g, e);
1257 } else {
1258 assert(flatindex(aghead(e)) < M->nrows);
1259 assert(flatindex(agtail(e)) < M->ncols);
1260 ELT(M, flatindex(agtail(e)), flatindex(aghead(e))) = 1;
1261 if (ND_mark(aghead(e)) == FALSE)
1262 flat_search(g, aghead(e));
1263 }
1264 }
1265 ND_onstack(v) = FALSE;
1266 }
1267
flat_breakcycles(graph_t * g)1268 static void flat_breakcycles(graph_t * g)
1269 {
1270 int i, r, flat;
1271 node_t *v;
1272
1273 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1274 flat = 0;
1275 for (i = 0; i < GD_rank(g)[r].n; i++) {
1276 v = GD_rank(g)[r].v[i];
1277 ND_mark(v) = ND_onstack(v) = FALSE;
1278 flatindex(v) = i;
1279 if ((ND_flat_out(v).size > 0) && (flat == 0)) {
1280 GD_rank(g)[r].flat =
1281 new_matrix(GD_rank(g)[r].n, GD_rank(g)[r].n);
1282 flat = 1;
1283 }
1284 }
1285 if (flat) {
1286 for (i = 0; i < GD_rank(g)[r].n; i++) {
1287 v = GD_rank(g)[r].v[i];
1288 if (ND_mark(v) == FALSE)
1289 flat_search(g, v);
1290 }
1291 }
1292 }
1293 }
1294
1295 /* allocate_ranks:
1296 * Allocate rank structure, determining number of nodes per rank.
1297 * Note that no nodes are put into the structure yet.
1298 */
allocate_ranks(graph_t * g)1299 void allocate_ranks(graph_t * g)
1300 {
1301 int r, low, high, *cn;
1302 node_t *n;
1303 edge_t *e;
1304
1305 cn = N_NEW(GD_maxrank(g) + 2, int); /* must be 0 based, not GD_minrank */
1306 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
1307 cn[ND_rank(n)]++;
1308 for (e = agfstout(g, n); e; e = agnxtout(g, e)) {
1309 low = ND_rank(agtail(e));
1310 high = ND_rank(aghead(e));
1311 if (low > high) {
1312 int t = low;
1313 low = high;
1314 high = t;
1315 }
1316 for (r = low + 1; r < high; r++)
1317 cn[r]++;
1318 }
1319 }
1320 GD_rank(g) = N_NEW(GD_maxrank(g) + 2, rank_t);
1321 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1322 GD_rank(g)[r].an = GD_rank(g)[r].n = cn[r];
1323 GD_rank(g)[r].av = GD_rank(g)[r].v = N_NEW(cn[r] + 1, node_t *);
1324 }
1325 free(cn);
1326 }
1327
1328 /* install a node at the current right end of its rank */
install_in_rank(graph_t * g,node_t * n)1329 void install_in_rank(graph_t * g, node_t * n)
1330 {
1331 int i, r;
1332
1333 r = ND_rank(n);
1334 i = GD_rank(g)[r].n;
1335 if (GD_rank(g)[r].an <= 0) {
1336 agerr(AGERR, "install_in_rank, line %d: %s %s rank %d i = %d an = 0\n",
1337 __LINE__, agnameof(g), agnameof(n), r, i);
1338 return;
1339 }
1340
1341 GD_rank(g)[r].v[i] = n;
1342 ND_order(n) = i;
1343 GD_rank(g)[r].n++;
1344 assert(GD_rank(g)[r].n <= GD_rank(g)[r].an);
1345 #ifdef DEBUG
1346 {
1347 node_t *v;
1348
1349 for (v = GD_nlist(g); v; v = ND_next(v))
1350 if (v == n)
1351 break;
1352 assert(v != NULL);
1353 }
1354 #endif
1355 if (ND_order(n) > GD_rank(Root)[r].an) {
1356 agerr(AGERR, "install_in_rank, line %d: ND_order(%s) [%d] > GD_rank(Root)[%d].an [%d]\n",
1357 __LINE__, agnameof(n), ND_order(n), r, GD_rank(Root)[r].an);
1358 return;
1359 }
1360 if ((r < GD_minrank(g)) || (r > GD_maxrank(g))) {
1361 agerr(AGERR, "install_in_rank, line %d: rank %d not in rank range [%d,%d]\n",
1362 __LINE__, r, GD_minrank(g), GD_maxrank(g));
1363 return;
1364 }
1365 if (GD_rank(g)[r].v + ND_order(n) >
1366 GD_rank(g)[r].av + GD_rank(Root)[r].an) {
1367 agerr(AGERR, "install_in_rank, line %d: GD_rank(g)[%d].v + ND_order(%s) [%d] > GD_rank(g)[%d].av + GD_rank(Root)[%d].an [%d]\n",
1368 __LINE__, r, agnameof(n),ND_order(n), r, r, GD_rank(Root)[r].an);
1369 return;
1370 }
1371 }
1372
1373 /* install nodes in ranks. the initial ordering ensure that series-parallel
1374 * graphs such as trees are drawn with no crossings. it tries searching
1375 * in- and out-edges and takes the better of the two initial orderings.
1376 */
build_ranks(graph_t * g,int pass)1377 void build_ranks(graph_t * g, int pass)
1378 {
1379 int i, j;
1380 node_t *n, *n0;
1381 edge_t **otheredges;
1382 nodequeue *q;
1383
1384 q = new_queue(GD_n_nodes(g));
1385 for (n = GD_nlist(g); n; n = ND_next(n))
1386 MARK(n) = FALSE;
1387
1388 #ifdef DEBUG
1389 {
1390 edge_t *e;
1391 for (n = GD_nlist(g); n; n = ND_next(n)) {
1392 for (i = 0; (e = ND_out(n).list[i]); i++)
1393 assert(MARK(aghead(e)) == FALSE);
1394 for (i = 0; (e = ND_in(n).list[i]); i++)
1395 assert(MARK(agtail(e)) == FALSE);
1396 }
1397 }
1398 #endif
1399
1400 for (i = GD_minrank(g); i <= GD_maxrank(g); i++)
1401 GD_rank(g)[i].n = 0;
1402
1403 for (n = GD_nlist(g); n; n = ND_next(n)) {
1404 otheredges = ((pass == 0) ? ND_in(n).list : ND_out(n).list);
1405 if (otheredges[0] != NULL)
1406 continue;
1407 if (MARK(n) == FALSE) {
1408 MARK(n) = TRUE;
1409 enqueue(q, n);
1410 while ((n0 = dequeue(q))) {
1411 if (ND_ranktype(n0) != CLUSTER) {
1412 install_in_rank(g, n0);
1413 enqueue_neighbors(q, n0, pass);
1414 } else {
1415 install_cluster(g, n0, pass, q);
1416 }
1417 }
1418 }
1419 }
1420 if (dequeue(q))
1421 agerr(AGERR, "surprise\n");
1422 for (i = GD_minrank(g); i <= GD_maxrank(g); i++) {
1423 GD_rank(Root)[i].valid = FALSE;
1424 if (GD_flip(g) && (GD_rank(g)[i].n > 0)) {
1425 int n, ndiv2;
1426 node_t **vlist = GD_rank(g)[i].v;
1427 n = GD_rank(g)[i].n - 1;
1428 ndiv2 = n / 2;
1429 for (j = 0; j <= ndiv2; j++)
1430 exchange(vlist[j], vlist[n - j]);
1431 }
1432 }
1433
1434 if ((g == dot_root(g)) && ncross(g) > 0)
1435 transpose(g, FALSE);
1436 free_queue(q);
1437 }
1438
enqueue_neighbors(nodequeue * q,node_t * n0,int pass)1439 void enqueue_neighbors(nodequeue * q, node_t * n0, int pass)
1440 {
1441 int i;
1442 edge_t *e;
1443
1444 if (pass == 0) {
1445 for (i = 0; i < ND_out(n0).size; i++) {
1446 e = ND_out(n0).list[i];
1447 if ((MARK(aghead(e))) == FALSE) {
1448 MARK(aghead(e)) = TRUE;
1449 enqueue(q, aghead(e));
1450 }
1451 }
1452 } else {
1453 for (i = 0; i < ND_in(n0).size; i++) {
1454 e = ND_in(n0).list[i];
1455 if ((MARK(agtail(e))) == FALSE) {
1456 MARK(agtail(e)) = TRUE;
1457 enqueue(q, agtail(e));
1458 }
1459 }
1460 }
1461 }
1462
constraining_flat_edge(Agraph_t * g,Agnode_t * v,Agedge_t * e)1463 static int constraining_flat_edge(Agraph_t *g, Agnode_t *v, Agedge_t *e)
1464 {
1465 if (ED_weight(e) == 0) return FALSE;
1466 if (!inside_cluster(g,agtail(e))) return FALSE;
1467 if (!inside_cluster(g,aghead(e))) return FALSE;
1468 return TRUE;
1469 }
1470
1471
1472 /* construct nodes reachable from 'here' in post-order.
1473 * This is the same as doing a topological sort in reverse order.
1474 */
postorder(graph_t * g,node_t * v,node_t ** list,int r)1475 static int postorder(graph_t * g, node_t * v, node_t ** list, int r)
1476 {
1477 edge_t *e;
1478 int i, cnt = 0;
1479
1480 MARK(v) = TRUE;
1481 if (ND_flat_out(v).size > 0) {
1482 for (i = 0; (e = ND_flat_out(v).list[i]); i++) {
1483 if (!constraining_flat_edge(g,v,e)) continue;
1484 if (MARK(aghead(e)) == FALSE)
1485 cnt += postorder(g, aghead(e), list + cnt, r);
1486 }
1487 }
1488 assert(ND_rank(v) == r);
1489 list[cnt++] = v;
1490 return cnt;
1491 }
1492
flat_reorder(graph_t * g)1493 static void flat_reorder(graph_t * g)
1494 {
1495 int i, j, r, pos, n_search, local_in_cnt, local_out_cnt, base_order;
1496 node_t *v, **left, **right, *t;
1497 node_t **temprank = NULL;
1498 edge_t *flat_e, *e;
1499
1500 if (GD_has_flat_edges(g) == FALSE)
1501 return;
1502 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1503 if (GD_rank(g)[r].n == 0) continue;
1504 base_order = ND_order(GD_rank(g)[r].v[0]);
1505 for (i = 0; i < GD_rank(g)[r].n; i++)
1506 MARK(GD_rank(g)[r].v[i]) = FALSE;
1507 temprank = ALLOC(i + 1, temprank, node_t *);
1508 pos = 0;
1509
1510 /* construct reverse topological sort order in temprank */
1511 for (i = 0; i < GD_rank(g)[r].n; i++) {
1512 if (GD_flip(g)) v = GD_rank(g)[r].v[i];
1513 else v = GD_rank(g)[r].v[GD_rank(g)[r].n - i - 1];
1514
1515 local_in_cnt = local_out_cnt = 0;
1516 for (j = 0; j < ND_flat_in(v).size; j++) {
1517 flat_e = ND_flat_in(v).list[j];
1518 if (constraining_flat_edge(g,v,flat_e)) local_in_cnt++;
1519 }
1520 for (j = 0; j < ND_flat_out(v).size; j++) {
1521 flat_e = ND_flat_out(v).list[j];
1522 if (constraining_flat_edge(g,v,flat_e)) local_out_cnt++;
1523 }
1524 if ((local_in_cnt == 0) && (local_out_cnt == 0))
1525 temprank[pos++] = v;
1526 else {
1527 if ((MARK(v) == FALSE) && (local_in_cnt == 0)) {
1528 left = temprank + pos;
1529 n_search = postorder(g, v, left, r);
1530 pos += n_search;
1531 }
1532 }
1533 }
1534
1535 if (pos) {
1536 if (GD_flip(g) == FALSE) {
1537 left = temprank;
1538 right = temprank + pos - 1;
1539 while (left < right) {
1540 t = *left;
1541 *left = *right;
1542 *right = t;
1543 left++;
1544 right--;
1545 }
1546 }
1547 for (i = 0; i < GD_rank(g)[r].n; i++) {
1548 v = GD_rank(g)[r].v[i] = temprank[i];
1549 ND_order(v) = i + base_order;
1550 }
1551
1552 /* nonconstraint flat edges must be made LR */
1553 for (i = 0; i < GD_rank(g)[r].n; i++) {
1554 v = GD_rank(g)[r].v[i];
1555 if (ND_flat_out(v).list) {
1556 for (j = 0; (e = ND_flat_out(v).list[j]); j++) {
1557 if ( ((GD_flip(g) == FALSE) && (ND_order(aghead(e)) < ND_order(agtail(e)))) ||
1558 ( (GD_flip(g)) && (ND_order(aghead(e)) > ND_order(agtail(e)) ))) {
1559 assert(constraining_flat_edge(g,v,e) == FALSE);
1560 delete_flat_edge(e);
1561 j--;
1562 flat_rev(g, e);
1563 }
1564 }
1565 }
1566 }
1567 /* postprocess to restore intended order */
1568 }
1569 /* else do no harm! */
1570 GD_rank(Root)[r].valid = FALSE;
1571 }
1572 if (temprank)
1573 free(temprank);
1574 }
1575
reorder(graph_t * g,int r,int reverse,int hasfixed)1576 static void reorder(graph_t * g, int r, int reverse, int hasfixed)
1577 {
1578 int changed = 0, nelt;
1579 boolean muststay, sawclust;
1580 node_t **vlist = GD_rank(g)[r].v;
1581 node_t **lp, **rp, **ep = vlist + GD_rank(g)[r].n;
1582
1583 for (nelt = GD_rank(g)[r].n - 1; nelt >= 0; nelt--) {
1584 lp = vlist;
1585 while (lp < ep) {
1586 /* find leftmost node that can be compared */
1587 while ((lp < ep) && (ND_mval(*lp) < 0))
1588 lp++;
1589 if (lp >= ep)
1590 break;
1591 /* find the node that can be compared */
1592 sawclust = muststay = FALSE;
1593 for (rp = lp + 1; rp < ep; rp++) {
1594 if (sawclust && ND_clust(*rp))
1595 continue; /* ### */
1596 if (left2right(g, *lp, *rp)) {
1597 muststay = TRUE;
1598 break;
1599 }
1600 if (ND_mval(*rp) >= 0)
1601 break;
1602 if (ND_clust(*rp))
1603 sawclust = TRUE; /* ### */
1604 }
1605 if (rp >= ep)
1606 break;
1607 if (muststay == FALSE) {
1608 register int p1 = (ND_mval(*lp));
1609 register int p2 = (ND_mval(*rp));
1610 if ((p1 > p2) || ((p1 == p2) && (reverse))) {
1611 exchange(*lp, *rp);
1612 changed++;
1613 }
1614 }
1615 lp = rp;
1616 }
1617 if ((hasfixed == FALSE) && (reverse == FALSE))
1618 ep--;
1619 }
1620
1621 if (changed) {
1622 GD_rank(Root)[r].valid = FALSE;
1623 if (r > 0)
1624 GD_rank(Root)[r - 1].valid = FALSE;
1625 }
1626 }
1627
mincross_step(graph_t * g,int pass)1628 static void mincross_step(graph_t * g, int pass)
1629 {
1630 int r, other, first, last, dir;
1631 int hasfixed, reverse;
1632
1633 if ((pass % 4) < 2)
1634 reverse = TRUE;
1635 else
1636 reverse = FALSE;
1637 if (pass % 2) {
1638 r = GD_maxrank(g) - 1;
1639 dir = -1;
1640 } /* up pass */
1641 else {
1642 r = 1;
1643 dir = 1;
1644 } /* down pass */
1645
1646 if (pass % 2 == 0) { /* down pass */
1647 first = GD_minrank(g) + 1;
1648 if (GD_minrank(g) > GD_minrank(Root))
1649 first--;
1650 last = GD_maxrank(g);
1651 dir = 1;
1652 } else { /* up pass */
1653 first = GD_maxrank(g) - 1;
1654 last = GD_minrank(g);
1655 if (GD_maxrank(g) < GD_maxrank(Root))
1656 first++;
1657 dir = -1;
1658 }
1659
1660 for (r = first; r != last + dir; r += dir) {
1661 other = r - dir;
1662 hasfixed = medians(g, r, other);
1663 reorder(g, r, reverse, hasfixed);
1664 }
1665 transpose(g, NOT(reverse));
1666 }
1667
local_cross(elist l,int dir)1668 static int local_cross(elist l, int dir)
1669 {
1670 int i, j, is_out;
1671 int cross = 0;
1672 edge_t *e, *f;
1673 if (dir > 0)
1674 is_out = TRUE;
1675 else
1676 is_out = FALSE;
1677 for (i = 0; (e = l.list[i]); i++) {
1678 if (is_out)
1679 for (j = i + 1; (f = l.list[j]); j++) {
1680 if ((ND_order(aghead(f)) - ND_order(aghead(e)))
1681 * (ED_tail_port(f).p.x - ED_tail_port(e).p.x) < 0)
1682 cross += ED_xpenalty(e) * ED_xpenalty(f);
1683 } else
1684 for (j = i + 1; (f = l.list[j]); j++) {
1685 if ((ND_order(agtail(f)) - ND_order(agtail(e)))
1686 * (ED_head_port(f).p.x - ED_head_port(e).p.x) < 0)
1687 cross += ED_xpenalty(e) * ED_xpenalty(f);
1688 }
1689 }
1690 return cross;
1691 }
1692
rcross(graph_t * g,int r)1693 static int rcross(graph_t * g, int r)
1694 {
1695 static int *Count, C;
1696 int top, bot, cross, max, i, k;
1697 node_t **rtop, *v;
1698
1699 cross = 0;
1700 max = 0;
1701 rtop = GD_rank(g)[r].v;
1702
1703 if (C <= GD_rank(Root)[r + 1].n) {
1704 C = GD_rank(Root)[r + 1].n + 1;
1705 Count = ALLOC(C, Count, int);
1706 }
1707
1708 for (i = 0; i < GD_rank(g)[r + 1].n; i++)
1709 Count[i] = 0;
1710
1711 for (top = 0; top < GD_rank(g)[r].n; top++) {
1712 register edge_t *e;
1713 if (max > 0) {
1714 for (i = 0; (e = ND_out(rtop[top]).list[i]); i++) {
1715 for (k = ND_order(aghead(e)) + 1; k <= max; k++)
1716 cross += Count[k] * ED_xpenalty(e);
1717 }
1718 }
1719 for (i = 0; (e = ND_out(rtop[top]).list[i]); i++) {
1720 register int inv = ND_order(aghead(e));
1721 if (inv > max)
1722 max = inv;
1723 Count[inv] += ED_xpenalty(e);
1724 }
1725 }
1726 for (top = 0; top < GD_rank(g)[r].n; top++) {
1727 v = GD_rank(g)[r].v[top];
1728 if (ND_has_port(v))
1729 cross += local_cross(ND_out(v), 1);
1730 }
1731 for (bot = 0; bot < GD_rank(g)[r + 1].n; bot++) {
1732 v = GD_rank(g)[r + 1].v[bot];
1733 if (ND_has_port(v))
1734 cross += local_cross(ND_in(v), -1);
1735 }
1736 return cross;
1737 }
1738
ncross(graph_t * g)1739 int ncross(graph_t * g)
1740 {
1741 int r, count, nc;
1742
1743 g = Root;
1744 count = 0;
1745 for (r = GD_minrank(g); r < GD_maxrank(g); r++) {
1746 if (GD_rank(g)[r].valid)
1747 count += GD_rank(g)[r].cache_nc;
1748 else {
1749 nc = GD_rank(g)[r].cache_nc = rcross(g, r);
1750 count += nc;
1751 GD_rank(g)[r].valid = TRUE;
1752 }
1753 }
1754 return count;
1755 }
1756
ordercmpf(int * i0,int * i1)1757 static int ordercmpf(int *i0, int *i1)
1758 {
1759 return (*i0) - (*i1);
1760 }
1761
1762 /* flat_mval:
1763 * Calculate a mval for nodes with no in or out non-flat edges.
1764 * Assume (ND_out(n).size == 0) && (ND_in(n).size == 0)
1765 * Find flat edge a->n where a has the largest order and set
1766 * n.mval = a.mval+1, assuming a.mval is defined (>=0).
1767 * If there are no flat in edges, find flat edge n->a where a
1768 * has the smallest order and set * n.mval = a.mval-1, assuming
1769 * a.mval is > 0.
1770 * Return true if n.mval is left -1, indicating a fixed node for sorting.
1771 */
flat_mval(node_t * n)1772 static int flat_mval(node_t * n)
1773 {
1774 int i;
1775 edge_t *e, **fl;
1776 node_t *nn;
1777
1778 if (ND_flat_in(n).size > 0) {
1779 fl = ND_flat_in(n).list;
1780 nn = agtail(fl[0]);
1781 for (i = 1; (e = fl[i]); i++)
1782 if (ND_order(agtail(e)) > ND_order(nn))
1783 nn = agtail(e);
1784 if (ND_mval(nn) >= 0) {
1785 ND_mval(n) = ND_mval(nn) + 1;
1786 return FALSE;
1787 }
1788 } else if (ND_flat_out(n).size > 0) {
1789 fl = ND_flat_out(n).list;
1790 nn = aghead(fl[0]);
1791 for (i = 1; (e = fl[i]); i++)
1792 if (ND_order(aghead(e)) < ND_order(nn))
1793 nn = aghead(e);
1794 if (ND_mval(nn) > 0) {
1795 ND_mval(n) = ND_mval(nn) - 1;
1796 return FALSE;
1797 }
1798 }
1799 return TRUE;
1800 }
1801
1802 #define VAL(node,port) (MC_SCALE * ND_order(node) + (port).order)
1803
medians(graph_t * g,int r0,int r1)1804 static boolean medians(graph_t * g, int r0, int r1)
1805 {
1806 int i, j, j0, lm, rm, lspan, rspan, *list;
1807 node_t *n, **v;
1808 edge_t *e;
1809 boolean hasfixed = FALSE;
1810
1811 list = TI_list;
1812 v = GD_rank(g)[r0].v;
1813 for (i = 0; i < GD_rank(g)[r0].n; i++) {
1814 n = v[i];
1815 j = 0;
1816 if (r1 > r0)
1817 for (j0 = 0; (e = ND_out(n).list[j0]); j0++) {
1818 if (ED_xpenalty(e) > 0)
1819 list[j++] = VAL(aghead(e), ED_head_port(e));
1820 } else
1821 for (j0 = 0; (e = ND_in(n).list[j0]); j0++) {
1822 if (ED_xpenalty(e) > 0)
1823 list[j++] = VAL(agtail(e), ED_tail_port(e));
1824 }
1825 switch (j) {
1826 case 0:
1827 ND_mval(n) = -1;
1828 break;
1829 case 1:
1830 ND_mval(n) = list[0];
1831 break;
1832 case 2:
1833 ND_mval(n) = (list[0] + list[1]) / 2;
1834 break;
1835 default:
1836 qsort(list, j, sizeof(int), (qsort_cmpf) ordercmpf);
1837 if (j % 2)
1838 ND_mval(n) = list[j / 2];
1839 else {
1840 /* weighted median */
1841 rm = j / 2;
1842 lm = rm - 1;
1843 rspan = list[j - 1] - list[rm];
1844 lspan = list[lm] - list[0];
1845 if (lspan == rspan)
1846 ND_mval(n) = (list[lm] + list[rm]) / 2;
1847 else {
1848 double w = list[lm] * (double)rspan + list[rm] * (double)lspan;
1849 ND_mval(n) = w / (lspan + rspan);
1850 }
1851 }
1852 }
1853 }
1854 for (i = 0; i < GD_rank(g)[r0].n; i++) {
1855 n = v[i];
1856 if ((ND_out(n).size == 0) && (ND_in(n).size == 0))
1857 hasfixed |= flat_mval(n);
1858 }
1859 return hasfixed;
1860 }
1861
nodeposcmpf(node_t ** n0,node_t ** n1)1862 static int nodeposcmpf(node_t ** n0, node_t ** n1)
1863 {
1864 return (ND_order(*n0) - ND_order(*n1));
1865 }
1866
edgeidcmpf(edge_t ** e0,edge_t ** e1)1867 static int edgeidcmpf(edge_t ** e0, edge_t ** e1)
1868 {
1869 return (AGSEQ(*e0) - AGSEQ(*e1));
1870 }
1871
1872 /* following code deals with weights of edges of "virtual" nodes */
1873 #define ORDINARY 0
1874 #define SINGLETON 1
1875 #define VIRTUALNODE 2
1876 #define NTYPES 3
1877
1878 #define C_EE 1
1879 #define C_VS 2
1880 #define C_SS 2
1881 #define C_VV 4
1882
1883 static int table[NTYPES][NTYPES] = {
1884 /* ordinary */ {C_EE, C_EE, C_EE},
1885 /* singleton */ {C_EE, C_SS, C_VS},
1886 /* virtual */ {C_EE, C_VS, C_VV}
1887 };
1888
endpoint_class(node_t * n)1889 static int endpoint_class(node_t * n)
1890 {
1891 if (ND_node_type(n) == VIRTUAL)
1892 return VIRTUALNODE;
1893 if (ND_weight_class(n) <= 1)
1894 return SINGLETON;
1895 return ORDINARY;
1896 }
1897
virtual_weight(edge_t * e)1898 void virtual_weight(edge_t * e)
1899 {
1900 int t;
1901 t = table[endpoint_class(agtail(e))][endpoint_class(aghead(e))];
1902 ED_weight(e) *= t;
1903 }
1904
1905 #ifdef DEBUG
check_rs(graph_t * g,int null_ok)1906 void check_rs(graph_t * g, int null_ok)
1907 {
1908 int i, r;
1909 node_t *v, *prev;
1910
1911 fprintf(stderr, "\n\n%s:\n", agnameof(g));
1912 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1913 fprintf(stderr, "%d: ", r);
1914 prev = NULL;
1915 for (i = 0; i < GD_rank(g)[r].n; i++) {
1916 v = GD_rank(g)[r].v[i];
1917 if (v == NULL) {
1918 fprintf(stderr, "NULL\t");
1919 if (null_ok == FALSE)
1920 abort();
1921 } else {
1922 fprintf(stderr, "%s(%f)\t", agnameof(v), ND_mval(v));
1923 assert(ND_rank(v) == r);
1924 assert(v != prev);
1925 prev = v;
1926 }
1927 }
1928 fprintf(stderr, "\n");
1929 }
1930 }
1931
check_order(void)1932 void check_order(void)
1933 {
1934 int i, r;
1935 node_t *v;
1936 graph_t *g = Root;
1937
1938 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1939 assert(GD_rank(g)[r].v[GD_rank(g)[r].n] == NULL);
1940 for (i = 0; (v = GD_rank(g)[r].v[i]); i++) {
1941 assert(ND_rank(v) == r);
1942 assert(ND_order(v) == i);
1943 }
1944 }
1945 }
1946 #endif
1947
mincross_options(graph_t * g)1948 static void mincross_options(graph_t * g)
1949 {
1950 char *p;
1951 double f;
1952
1953 /* set default values */
1954 MinQuit = 8;
1955 MaxIter = 24;
1956 Convergence = .995;
1957
1958 p = agget(g, "mclimit");
1959 if (p && ((f = atof(p)) > 0.0)) {
1960 MinQuit = MAX(1, MinQuit * f);
1961 MaxIter = MAX(1, MaxIter * f);
1962 }
1963 }
1964
1965 #ifdef DEBUG
check_exchange(node_t * v,node_t * w)1966 void check_exchange(node_t * v, node_t * w)
1967 {
1968 int i, r;
1969 node_t *u;
1970
1971 if ((ND_clust(v) == NULL) && (ND_clust(w) == NULL))
1972 return;
1973 assert((ND_clust(v) == NULL) || (ND_clust(w) == NULL));
1974 assert(ND_rank(v) == ND_rank(w));
1975 assert(ND_order(v) < ND_order(w));
1976 r = ND_rank(v);
1977
1978 for (i = ND_order(v) + 1; i < ND_order(w); i++) {
1979 u = GD_rank(dot_root(v))[r].v[i];
1980 if (ND_clust(u))
1981 abort();
1982 }
1983 }
1984
check_vlists(graph_t * g)1985 void check_vlists(graph_t * g)
1986 {
1987 int c, i, j, r;
1988 node_t *u;
1989
1990 for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
1991 for (i = 0; i < GD_rank(g)[r].n; i++) {
1992 u = GD_rank(g)[r].v[i];
1993 j = ND_order(u);
1994 assert(GD_rank(Root)[r].v[j] == u);
1995 }
1996 if (GD_rankleader(g)) {
1997 u = GD_rankleader(g)[r];
1998 j = ND_order(u);
1999 assert(GD_rank(Root)[r].v[j] == u);
2000 }
2001 }
2002 for (c = 1; c <= GD_n_cluster(g); c++)
2003 check_vlists(GD_clust(g)[c]);
2004 }
2005
node_in_root_vlist(node_t * n)2006 void node_in_root_vlist(node_t * n)
2007 {
2008 node_t **vptr;
2009
2010 for (vptr = GD_rank(Root)[ND_rank(n)].v; *vptr; vptr++)
2011 if (*vptr == n)
2012 break;
2013 if (*vptr == 0)
2014 abort();
2015 }
2016 #endif /* DEBUG code */
2017