mincross.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892
  1. /*************************************************************************
  2. * Copyright (c) 2011 AT&T Intellectual Property
  3. * All rights reserved. This program and the accompanying materials
  4. * are made available under the terms of the Eclipse Public License v1.0
  5. * which accompanies this distribution, and is available at
  6. * https://www.eclipse.org/legal/epl-v10.html
  7. *
  8. * Contributors: Details at https://graphviz.org
  9. *************************************************************************/
  10. /*
  11. * dot_mincross(g) takes a ranked graphs, and finds an ordering
  12. * that avoids edge crossings. clusters are expanded.
  13. * N.B. the rank structure is global (not allocated per cluster)
  14. * because mincross may compare nodes in different clusters.
  15. */
  16. #include <assert.h>
  17. #include <cgraph/cgraph.h>
  18. #include <cgraph/gv_math.h>
  19. #include <cgraph/list.h>
  20. #include <dotgen/dot.h>
  21. #include <limits.h>
  22. #include <stdbool.h>
  23. #include <stdlib.h>
  24. #include <string.h>
  25. #include <util/agxbuf.h>
  26. #include <util/alloc.h>
  27. #include <util/exit.h>
  28. #include <util/streq.h>
  29. struct adjmatrix_t {
  30. size_t nrows;
  31. size_t ncols;
  32. char *data;
  33. };
  34. /* #define DEBUG */
  35. #define MARK(v) (ND_mark(v))
  36. #define saveorder(v) (ND_coord(v)).x
  37. #define flatindex(v) ((size_t)ND_low(v))
  38. /* forward declarations */
  39. static bool medians(graph_t * g, int r0, int r1);
  40. static int nodeposcmpf(const void *, const void *);
  41. static int edgeidcmpf(const void *, const void *);
  42. static void flat_breakcycles(graph_t * g);
  43. static void flat_reorder(graph_t * g);
  44. static void flat_search(graph_t * g, node_t * v);
  45. static void init_mincross(graph_t * g);
  46. static void merge2(graph_t * g);
  47. static void init_mccomp(graph_t *g, size_t c);
  48. static void cleanup2(graph_t * g, int nc);
  49. static int mincross_clust(graph_t *g, ints_t *scratch);
  50. static int mincross(graph_t *g, int startpass, ints_t *scratch);
  51. static void mincross_step(graph_t * g, int pass);
  52. static void mincross_options(graph_t * g);
  53. static void save_best(graph_t * g);
  54. static void restore_best(graph_t * g);
  55. static adjmatrix_t *new_matrix(size_t i, size_t j);
  56. static void free_matrix(adjmatrix_t * p);
  57. static int ordercmpf(const void *, const void *);
  58. static int ncross(ints_t *scratch);
  59. #ifdef DEBUG
  60. #if DEBUG > 1
  61. static int gd_minrank(Agraph_t *g) {return GD_minrank(g);}
  62. static int gd_maxrank(Agraph_t *g) {return GD_maxrank(g);}
  63. static rank_t *gd_rank(Agraph_t *g, int r) {return &GD_rank(g)[r];}
  64. static int nd_order(Agnode_t *v) { return ND_order(v); }
  65. #endif
  66. void check_rs(graph_t * g, int null_ok);
  67. void check_order(void);
  68. void check_vlists(graph_t * g);
  69. void node_in_root_vlist(node_t * n);
  70. #endif
  71. /* mincross parameters */
  72. static int MinQuit;
  73. static const double Convergence = .995;
  74. static graph_t *Root;
  75. static int GlobalMinRank, GlobalMaxRank;
  76. static edge_t **TE_list;
  77. static int *TI_list;
  78. static bool ReMincross;
  79. #if defined(DEBUG) && DEBUG > 1
  80. static void indent(graph_t* g)
  81. {
  82. if (g->parent) {
  83. fprintf (stderr, " ");
  84. indent(g->parent);
  85. }
  86. }
  87. static char* nname(node_t* v)
  88. {
  89. static char buf[1000];
  90. if (ND_node_type(v)) {
  91. if (ND_ranktype(v) == CLUSTER)
  92. snprintf(buf, sizeof(buf), "v%s_%p", agnameof(ND_clust(v)), v);
  93. else
  94. snprintf(buf, sizeof(buf), "v_%p", v);
  95. } else
  96. snprintf(buf, sizeof(buf), "%s", agnameof(v));
  97. return buf;
  98. }
  99. static void dumpg (graph_t* g)
  100. {
  101. int j, i, r;
  102. node_t* v;
  103. edge_t* e;
  104. fprintf (stderr, "digraph A {\n");
  105. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  106. fprintf (stderr, " subgraph {rank=same ");
  107. for (i = 0; i < GD_rank(g)[r].n; i++) {
  108. v = GD_rank(g)[r].v[i];
  109. if (i > 0)
  110. fprintf (stderr, " -> %s", nname(v));
  111. else
  112. fprintf (stderr, "%s", nname(v));
  113. }
  114. if (i > 1) fprintf (stderr, " [style=invis]}\n");
  115. else fprintf (stderr, " }\n");
  116. }
  117. for (r = GD_minrank(g); r < GD_maxrank(g); r++) {
  118. for (i = 0; i < GD_rank(g)[r].n; i++) {
  119. v = GD_rank(g)[r].v[i];
  120. for (j = 0; (e = ND_out(v).list[j]); j++) {
  121. fprintf (stderr, "%s -> ", nname(v));
  122. fprintf (stderr, "%s\n", nname(aghead(e)));
  123. }
  124. }
  125. }
  126. fprintf (stderr, "}\n");
  127. }
  128. static void dumpr (graph_t* g, int edges)
  129. {
  130. int j, i, r;
  131. node_t* v;
  132. edge_t* e;
  133. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  134. fprintf (stderr, "[%d] ", r);
  135. for (i = 0; i < GD_rank(g)[r].n; i++) {
  136. v = GD_rank(g)[r].v[i];
  137. fprintf (stderr, "%s(%.02f,%d) ", nname(v), saveorder(v),ND_order(v));
  138. }
  139. fprintf (stderr, "\n");
  140. }
  141. if (edges == 0) return;
  142. for (r = GD_minrank(g); r < GD_maxrank(g); r++) {
  143. for (i = 0; i < GD_rank(g)[r].n; i++) {
  144. v = GD_rank(g)[r].v[i];
  145. for (j = 0; (e = ND_out(v).list[j]); j++) {
  146. fprintf (stderr, "%s -> ", nname(v));
  147. fprintf (stderr, "%s\n", nname(aghead(e)));
  148. }
  149. }
  150. }
  151. }
  152. #endif
  153. typedef struct {
  154. Agrec_t h;
  155. int x, lo, hi;
  156. Agnode_t* np;
  157. } info_t;
  158. #define ND_x(n) (((info_t*)AGDATA(n))->x)
  159. #define ND_lo(n) (((info_t*)AGDATA(n))->lo)
  160. #define ND_hi(n) (((info_t*)AGDATA(n))->hi)
  161. #define ND_np(n) (((info_t*)AGDATA(n))->np)
  162. #define ND_idx(n) (ND_order(ND_np(n)))
  163. static void
  164. emptyComp (graph_t* sg)
  165. {
  166. Agnode_t* n;
  167. Agnode_t* nxt;
  168. for (n = agfstnode(sg); n; n = nxt) {
  169. nxt = agnxtnode (sg, n);
  170. agdelnode(sg,n);
  171. }
  172. }
  173. #define isBackedge(e) (ND_idx(aghead(e)) > ND_idx(agtail(e)))
  174. static Agnode_t*
  175. findSource (Agraph_t* g, Agraph_t* sg)
  176. {
  177. Agnode_t* n;
  178. for (n = agfstnode(sg); n; n = agnxtnode(sg, n))
  179. if (agdegree(g,n,1,0) == 0) return n;
  180. return NULL;
  181. }
  182. static int
  183. topsort (Agraph_t* g, Agraph_t* sg, Agnode_t** arr)
  184. {
  185. Agnode_t* n;
  186. Agedge_t* e;
  187. Agedge_t* nxte;
  188. int cnt = 0;
  189. while ((n = findSource(g, sg))) {
  190. arr[cnt++] = ND_np(n);
  191. agdelnode(sg, n);
  192. for (e = agfstout(g, n); e; e = nxte) {
  193. nxte = agnxtout(g, e);
  194. agdeledge(g, e);
  195. }
  196. }
  197. return cnt;
  198. }
  199. static int
  200. getComp (graph_t* g, node_t* n, graph_t* comp, int* indices)
  201. {
  202. int backedge = 0;
  203. Agedge_t* e;
  204. ND_x(n) = 1;
  205. indices[agnnodes(comp)] = ND_idx(n);
  206. agsubnode(comp, n, 1);
  207. for (e = agfstout(g,n); e; e = agnxtout(g,e)) {
  208. if (isBackedge(e)) backedge++;
  209. if (!ND_x(aghead(e)))
  210. backedge += getComp(g, aghead(e), comp, indices);
  211. }
  212. for (e = agfstin(g,n); e; e = agnxtin(g,e)) {
  213. if (isBackedge(e)) backedge++;
  214. if (!ND_x(agtail(e)))
  215. backedge += getComp(g, agtail(e), comp, indices);
  216. }
  217. return backedge;
  218. }
  219. /* fixLabelOrder:
  220. * For each pair of nodes (labels), we add an edge
  221. */
  222. static void
  223. fixLabelOrder (graph_t* g, rank_t* rk)
  224. {
  225. int cnt;
  226. bool haveBackedge = false;
  227. Agraph_t* sg;
  228. Agnode_t* n;
  229. Agnode_t* nxtp;
  230. Agnode_t* v;
  231. for (n = agfstnode(g); n; n = nxtp) {
  232. v = nxtp = agnxtnode(g, n);
  233. for (; v; v = agnxtnode(g, v)) {
  234. if (ND_hi(v) <= ND_lo(n)) {
  235. haveBackedge = true;
  236. agedge(g, v, n, NULL, 1);
  237. }
  238. else if (ND_hi(n) <= ND_lo(v)) {
  239. agedge(g, n, v, NULL, 1);
  240. }
  241. }
  242. }
  243. if (!haveBackedge) return;
  244. sg = agsubg(g, "comp", 1);
  245. Agnode_t **arr = gv_calloc(agnnodes(g), sizeof(Agnode_t*));
  246. int *indices = gv_calloc(agnnodes(g), sizeof(int));
  247. for (n = agfstnode(g); n; n = agnxtnode(g,n)) {
  248. if (ND_x(n) || agdegree(g,n,1,1) == 0) continue;
  249. if (getComp(g, n, sg, indices)) {
  250. int i, sz = agnnodes(sg);
  251. cnt = topsort (g, sg, arr);
  252. assert (cnt == sz);
  253. qsort(indices, cnt, sizeof(int), ordercmpf);
  254. for (i = 0; i < sz; i++) {
  255. ND_order(arr[i]) = indices[i];
  256. rk->v[indices[i]] = arr[i];
  257. }
  258. }
  259. emptyComp(sg);
  260. }
  261. free(indices);
  262. free (arr);
  263. }
  264. /* checkLabelOrder:
  265. * Check that the ordering of labels for flat edges is consistent.
  266. * This is necessary because dot_position will attempt to force the label
  267. * to be between the edge's vertices. This can lead to an infeasible problem.
  268. *
  269. * We check each rank for any flat edge labels (as dummy nodes) and create a
  270. * graph with a node for each label. If the graph contains more than 1 node, we
  271. * call fixLabelOrder to see if there really is a problem and, if so, fix it.
  272. */
  273. void
  274. checkLabelOrder (graph_t* g)
  275. {
  276. int j, r, lo, hi;
  277. graph_t* lg = NULL;
  278. agxbuf buf = {0};
  279. rank_t* rk;
  280. Agnode_t* u;
  281. Agnode_t* n;
  282. Agedge_t* e;
  283. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  284. rk = GD_rank(g)+r;
  285. for (j = 0; j < rk->n; j++) {
  286. u = rk->v[j];
  287. if ((e = ND_alg(u))) {
  288. if (!lg) lg = agopen ("lg", Agstrictdirected, 0);
  289. agxbprint(&buf, "%d", j);
  290. n = agnode(lg, agxbuse(&buf), 1);
  291. agbindrec(n, "info", sizeof(info_t), true);
  292. lo = ND_order(aghead(ND_out(u).list[0]));
  293. hi = ND_order(aghead(ND_out(u).list[1]));
  294. if (lo > hi) {
  295. int tmp;
  296. tmp = lo;
  297. lo = hi;
  298. hi = tmp;
  299. }
  300. ND_lo(n) = lo;
  301. ND_hi(n) = hi;
  302. ND_np(n) = u;
  303. }
  304. }
  305. if (lg) {
  306. if (agnnodes(lg) > 1) fixLabelOrder (lg, rk);
  307. agclose(lg);
  308. lg = NULL;
  309. }
  310. }
  311. agxbfree(&buf);
  312. }
  313. /* dot_mincross:
  314. * Minimize edge crossings
  315. * Note that nodes are not placed into GD_rank(g) until mincross()
  316. * is called.
  317. */
  318. void dot_mincross(graph_t *g) {
  319. int nc;
  320. char *s;
  321. /* check whether malformed input has led to empty cluster that the crossing
  322. * functions will not anticipate
  323. */
  324. {
  325. size_t i;
  326. for (i = 1; i <= (size_t)GD_n_cluster(g); ) {
  327. if (agfstnode(GD_clust(g)[i]) == NULL) {
  328. agwarningf("removing empty cluster\n");
  329. memmove(&GD_clust(g)[i], &GD_clust(g)[i + 1],
  330. ((size_t)GD_n_cluster(g) - i) * sizeof(GD_clust(g)[0]));
  331. --GD_n_cluster(g);
  332. } else {
  333. ++i;
  334. }
  335. }
  336. }
  337. init_mincross(g);
  338. ints_t scratch = {0};
  339. size_t comp;
  340. for (nc = 0, comp = 0; comp < GD_comp(g).size; comp++) {
  341. init_mccomp(g, comp);
  342. nc += mincross(g, 0, &scratch);
  343. }
  344. merge2(g);
  345. /* run mincross on contents of each cluster */
  346. for (int c = 1; c <= GD_n_cluster(g); c++) {
  347. nc += mincross_clust(GD_clust(g)[c], &scratch);
  348. #ifdef DEBUG
  349. check_vlists(GD_clust(g)[c]);
  350. check_order();
  351. #endif
  352. }
  353. if (GD_n_cluster(g) > 0 && (!(s = agget(g, "remincross")) || mapbool(s))) {
  354. mark_lowclusters(g);
  355. ReMincross = true;
  356. nc = mincross(g, 2, &scratch);
  357. #ifdef DEBUG
  358. for (int c = 1; c <= GD_n_cluster(g); c++)
  359. check_vlists(GD_clust(g)[c]);
  360. #endif
  361. }
  362. ints_free(&scratch);
  363. cleanup2(g, nc);
  364. }
  365. static adjmatrix_t *new_matrix(size_t i, size_t j) {
  366. adjmatrix_t *rv = gv_alloc(sizeof(adjmatrix_t));
  367. rv->nrows = i;
  368. rv->ncols = j;
  369. rv->data = gv_calloc(i * j, sizeof(char));
  370. return rv;
  371. }
  372. static void free_matrix(adjmatrix_t * p)
  373. {
  374. if (p) {
  375. free(p->data);
  376. free(p);
  377. }
  378. }
  379. #define ELT(M,i,j) (M->data[((i)*M->ncols)+(j)])
  380. static void init_mccomp(graph_t *g, size_t c) {
  381. int r;
  382. GD_nlist(g) = GD_comp(g).list[c];
  383. if (c > 0) {
  384. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  385. GD_rank(g)[r].v = GD_rank(g)[r].v + GD_rank(g)[r].n;
  386. GD_rank(g)[r].n = 0;
  387. }
  388. }
  389. }
  390. static int betweenclust(edge_t * e)
  391. {
  392. while (ED_to_orig(e))
  393. e = ED_to_orig(e);
  394. return (ND_clust(agtail(e)) != ND_clust(aghead(e)));
  395. }
  396. static void do_ordering_node(graph_t *g, node_t *n, bool outflag) {
  397. int i, ne;
  398. node_t *u, *v;
  399. edge_t *e, *f, *fe;
  400. edge_t **sortlist = TE_list;
  401. if (ND_clust(n))
  402. return;
  403. if (outflag) {
  404. for (i = ne = 0; (e = ND_out(n).list[i]); i++)
  405. if (!betweenclust(e))
  406. sortlist[ne++] = e;
  407. } else {
  408. for (i = ne = 0; (e = ND_in(n).list[i]); i++)
  409. if (!betweenclust(e))
  410. sortlist[ne++] = e;
  411. }
  412. if (ne <= 1)
  413. return;
  414. /* write null terminator at end of list.
  415. requires +1 in TE_list alloccation */
  416. sortlist[ne] = 0;
  417. qsort(sortlist, ne, sizeof(sortlist[0]), edgeidcmpf);
  418. for (ne = 1; (f = sortlist[ne]); ne++) {
  419. e = sortlist[ne - 1];
  420. if (outflag) {
  421. u = aghead(e);
  422. v = aghead(f);
  423. } else {
  424. u = agtail(e);
  425. v = agtail(f);
  426. }
  427. if (find_flat_edge(u, v))
  428. return;
  429. fe = new_virtual_edge(u, v, NULL);
  430. ED_edge_type(fe) = FLATORDER;
  431. flat_edge(g, fe);
  432. }
  433. }
  434. static void do_ordering(graph_t *g, bool outflag) {
  435. /* Order all nodes in graph */
  436. node_t *n;
  437. for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
  438. do_ordering_node (g, n, outflag);
  439. }
  440. }
  441. static void do_ordering_for_nodes(graph_t * g)
  442. {
  443. /* Order nodes which have the "ordered" attribute */
  444. node_t *n;
  445. const char *ordering;
  446. for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
  447. if ((ordering = late_string(n, N_ordering, NULL))) {
  448. if (streq(ordering, "out"))
  449. do_ordering_node(g, n, true);
  450. else if (streq(ordering, "in"))
  451. do_ordering_node(g, n, false);
  452. else if (ordering[0])
  453. agerrorf("ordering '%s' not recognized for node '%s'.\n", ordering, agnameof(n));
  454. }
  455. }
  456. }
  457. /* ordered_edges:
  458. * handle case where graph specifies edge ordering
  459. * If the graph does not have an ordering attribute, we then
  460. * check for nodes having the attribute.
  461. * Note that, in this implementation, the value of G_ordering
  462. * dominates the value of N_ordering.
  463. */
  464. static void ordered_edges(graph_t * g)
  465. {
  466. char *ordering;
  467. if (!G_ordering && !N_ordering)
  468. return;
  469. if ((ordering = late_string(g, G_ordering, NULL))) {
  470. if (streq(ordering, "out"))
  471. do_ordering(g, true);
  472. else if (streq(ordering, "in"))
  473. do_ordering(g, false);
  474. else if (ordering[0])
  475. agerrorf("ordering '%s' not recognized.\n", ordering);
  476. }
  477. else
  478. {
  479. graph_t *subg;
  480. for (subg = agfstsubg(g); subg; subg = agnxtsubg(subg)) {
  481. /* clusters are processed by separate calls to ordered_edges */
  482. if (!is_cluster(subg))
  483. ordered_edges(subg);
  484. }
  485. if (N_ordering) do_ordering_for_nodes (g);
  486. }
  487. }
  488. static int mincross_clust(graph_t *g, ints_t *scratch) {
  489. int c, nc;
  490. expand_cluster(g);
  491. ordered_edges(g);
  492. flat_breakcycles(g);
  493. flat_reorder(g);
  494. nc = mincross(g, 2, scratch);
  495. for (c = 1; c <= GD_n_cluster(g); c++)
  496. nc += mincross_clust(GD_clust(g)[c], scratch);
  497. save_vlist(g);
  498. return nc;
  499. }
  500. static bool left2right(graph_t *g, node_t *v, node_t *w) {
  501. adjmatrix_t *M;
  502. /* CLUSTER indicates orig nodes of clusters, and vnodes of skeletons */
  503. if (!ReMincross) {
  504. if (ND_clust(v) != ND_clust(w) && ND_clust(v) && ND_clust(w)) {
  505. /* the following allows cluster skeletons to be swapped */
  506. if (ND_ranktype(v) == CLUSTER && ND_node_type(v) == VIRTUAL)
  507. return false;
  508. if (ND_ranktype(w) == CLUSTER && ND_node_type(w) == VIRTUAL)
  509. return false;
  510. return true;
  511. }
  512. } else {
  513. if (ND_clust(v) != ND_clust(w))
  514. return true;
  515. }
  516. M = GD_rank(g)[ND_rank(v)].flat;
  517. if (M == NULL)
  518. return false;
  519. if (GD_flip(g)) {
  520. node_t *t = v;
  521. v = w;
  522. w = t;
  523. }
  524. return ELT(M, flatindex(v), flatindex(w)) != 0;
  525. }
  526. static int in_cross(node_t * v, node_t * w)
  527. {
  528. edge_t **e1, **e2;
  529. int inv, cross = 0, t;
  530. for (e2 = ND_in(w).list; *e2; e2++) {
  531. int cnt = ED_xpenalty(*e2);
  532. inv = ND_order(agtail(*e2));
  533. for (e1 = ND_in(v).list; *e1; e1++) {
  534. t = ND_order(agtail(*e1)) - inv;
  535. if (t > 0 || (t == 0 && ED_tail_port(*e1).p.x > ED_tail_port(*e2).p.x))
  536. cross += ED_xpenalty(*e1) * cnt;
  537. }
  538. }
  539. return cross;
  540. }
  541. static int out_cross(node_t * v, node_t * w)
  542. {
  543. edge_t **e1, **e2;
  544. int inv, cross = 0, t;
  545. for (e2 = ND_out(w).list; *e2; e2++) {
  546. int cnt = ED_xpenalty(*e2);
  547. inv = ND_order(aghead(*e2));
  548. for (e1 = ND_out(v).list; *e1; e1++) {
  549. t = ND_order(aghead(*e1)) - inv;
  550. if (t > 0 || (t == 0 && (ED_head_port(*e1)).p.x > (ED_head_port(*e2)).p.x))
  551. cross += ED_xpenalty(*e1) * cnt;
  552. }
  553. }
  554. return cross;
  555. }
  556. static void exchange(node_t * v, node_t * w)
  557. {
  558. int vi, wi, r;
  559. r = ND_rank(v);
  560. vi = ND_order(v);
  561. wi = ND_order(w);
  562. ND_order(v) = wi;
  563. GD_rank(Root)[r].v[wi] = v;
  564. ND_order(w) = vi;
  565. GD_rank(Root)[r].v[vi] = w;
  566. }
  567. static int transpose_step(graph_t * g, int r, bool reverse)
  568. {
  569. int i, c0, c1, rv;
  570. node_t *v, *w;
  571. rv = 0;
  572. GD_rank(g)[r].candidate = false;
  573. for (i = 0; i < GD_rank(g)[r].n - 1; i++) {
  574. v = GD_rank(g)[r].v[i];
  575. w = GD_rank(g)[r].v[i + 1];
  576. assert(ND_order(v) < ND_order(w));
  577. if (left2right(g, v, w))
  578. continue;
  579. c0 = c1 = 0;
  580. if (r > 0) {
  581. c0 += in_cross(v, w);
  582. c1 += in_cross(w, v);
  583. }
  584. if (GD_rank(g)[r + 1].n > 0) {
  585. c0 += out_cross(v, w);
  586. c1 += out_cross(w, v);
  587. }
  588. if (c1 < c0 || (c0 > 0 && reverse && c1 == c0)) {
  589. exchange(v, w);
  590. rv += c0 - c1;
  591. GD_rank(Root)[r].valid = false;
  592. GD_rank(g)[r].candidate = true;
  593. if (r > GD_minrank(g)) {
  594. GD_rank(Root)[r - 1].valid = false;
  595. GD_rank(g)[r - 1].candidate = true;
  596. }
  597. if (r < GD_maxrank(g)) {
  598. GD_rank(Root)[r + 1].valid = false;
  599. GD_rank(g)[r + 1].candidate = true;
  600. }
  601. }
  602. }
  603. return rv;
  604. }
  605. static void transpose(graph_t * g, bool reverse)
  606. {
  607. int r, delta;
  608. for (r = GD_minrank(g); r <= GD_maxrank(g); r++)
  609. GD_rank(g)[r].candidate = true;
  610. do {
  611. delta = 0;
  612. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  613. if (GD_rank(g)[r].candidate) {
  614. delta += transpose_step(g, r, reverse);
  615. }
  616. }
  617. } while (delta >= 1);
  618. }
  619. static int mincross(graph_t *g, int startpass, ints_t *scratch) {
  620. const int endpass = 2;
  621. int maxthispass = 0, iter, trying, pass;
  622. int cur_cross, best_cross;
  623. if (startpass > 1) {
  624. cur_cross = best_cross = ncross(scratch);
  625. save_best(g);
  626. } else
  627. cur_cross = best_cross = INT_MAX;
  628. for (pass = startpass; pass <= endpass; pass++) {
  629. if (pass <= 1) {
  630. maxthispass = MIN(4, MaxIter);
  631. if (g == dot_root(g))
  632. build_ranks(g, pass, scratch);
  633. if (pass == 0)
  634. flat_breakcycles(g);
  635. flat_reorder(g);
  636. if ((cur_cross = ncross(scratch)) <= best_cross) {
  637. save_best(g);
  638. best_cross = cur_cross;
  639. }
  640. } else {
  641. maxthispass = MaxIter;
  642. if (cur_cross > best_cross)
  643. restore_best(g);
  644. cur_cross = best_cross;
  645. }
  646. trying = 0;
  647. for (iter = 0; iter < maxthispass; iter++) {
  648. if (Verbose)
  649. fprintf(stderr,
  650. "mincross: pass %d iter %d trying %d cur_cross %d best_cross %d\n",
  651. pass, iter, trying, cur_cross, best_cross);
  652. if (trying++ >= MinQuit)
  653. break;
  654. if (cur_cross == 0)
  655. break;
  656. mincross_step(g, iter);
  657. if ((cur_cross = ncross(scratch)) <= best_cross) {
  658. save_best(g);
  659. if (cur_cross < Convergence * best_cross)
  660. trying = 0;
  661. best_cross = cur_cross;
  662. }
  663. }
  664. if (cur_cross == 0)
  665. break;
  666. }
  667. if (cur_cross > best_cross)
  668. restore_best(g);
  669. if (best_cross > 0) {
  670. transpose(g, false);
  671. best_cross = ncross(scratch);
  672. }
  673. return best_cross;
  674. }
  675. static void restore_best(graph_t * g)
  676. {
  677. node_t *n;
  678. int i, r;
  679. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  680. for (i = 0; i < GD_rank(g)[r].n; i++) {
  681. n = GD_rank(g)[r].v[i];
  682. ND_order(n) = saveorder(n);
  683. }
  684. }
  685. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  686. GD_rank(Root)[r].valid = false;
  687. qsort(GD_rank(g)[r].v, GD_rank(g)[r].n, sizeof(GD_rank(g)[0].v[0]),
  688. nodeposcmpf);
  689. }
  690. }
  691. static void save_best(graph_t * g)
  692. {
  693. node_t *n;
  694. int i, r;
  695. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  696. for (i = 0; i < GD_rank(g)[r].n; i++) {
  697. n = GD_rank(g)[r].v[i];
  698. saveorder(n) = ND_order(n);
  699. }
  700. }
  701. }
  702. /* merges the connected components of g */
  703. static void merge_components(graph_t * g)
  704. {
  705. node_t *u, *v;
  706. if (GD_comp(g).size <= 1)
  707. return;
  708. u = NULL;
  709. for (size_t c = 0; c < GD_comp(g).size; c++) {
  710. v = GD_comp(g).list[c];
  711. if (u)
  712. ND_next(u) = v;
  713. ND_prev(v) = u;
  714. while (ND_next(v)) {
  715. v = ND_next(v);
  716. }
  717. u = v;
  718. }
  719. GD_comp(g).size = 1;
  720. GD_nlist(g) = GD_comp(g).list[0];
  721. GD_minrank(g) = GlobalMinRank;
  722. GD_maxrank(g) = GlobalMaxRank;
  723. }
  724. /* merge connected components, create globally consistent rank lists */
  725. static void merge2(graph_t * g)
  726. {
  727. int i, r;
  728. node_t *v;
  729. /* merge the components and rank limits */
  730. merge_components(g);
  731. /* install complete ranks */
  732. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  733. GD_rank(g)[r].n = GD_rank(g)[r].an;
  734. GD_rank(g)[r].v = GD_rank(g)[r].av;
  735. for (i = 0; i < GD_rank(g)[r].n; i++) {
  736. v = GD_rank(g)[r].v[i];
  737. if (v == NULL) {
  738. if (Verbose)
  739. fprintf(stderr,
  740. "merge2: graph %s, rank %d has only %d < %d nodes\n",
  741. agnameof(g), r, i, GD_rank(g)[r].n);
  742. GD_rank(g)[r].n = i;
  743. break;
  744. }
  745. ND_order(v) = i;
  746. }
  747. }
  748. }
  749. static void cleanup2(graph_t * g, int nc)
  750. {
  751. int i, j, r, c;
  752. node_t *v;
  753. edge_t *e;
  754. if (TI_list) {
  755. free(TI_list);
  756. TI_list = NULL;
  757. }
  758. if (TE_list) {
  759. free(TE_list);
  760. TE_list = NULL;
  761. }
  762. /* fix vlists of clusters */
  763. for (c = 1; c <= GD_n_cluster(g); c++)
  764. rec_reset_vlists(GD_clust(g)[c]);
  765. /* remove node temporary edges for ordering nodes */
  766. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  767. for (i = 0; i < GD_rank(g)[r].n; i++) {
  768. v = GD_rank(g)[r].v[i];
  769. ND_order(v) = i;
  770. if (ND_flat_out(v).list) {
  771. for (j = 0; (e = ND_flat_out(v).list[j]); j++)
  772. if (ED_edge_type(e) == FLATORDER) {
  773. delete_flat_edge(e);
  774. free(e->base.data);
  775. free(e);
  776. j--;
  777. }
  778. }
  779. }
  780. free_matrix(GD_rank(g)[r].flat);
  781. }
  782. if (Verbose)
  783. fprintf(stderr, "mincross %s: %d crossings, %.2f secs.\n",
  784. agnameof(g), nc, elapsed_sec());
  785. }
  786. static node_t *neighbor(node_t * v, int dir)
  787. {
  788. node_t *rv;
  789. rv = NULL;
  790. assert(v);
  791. if (dir < 0) {
  792. if (ND_order(v) > 0)
  793. rv = GD_rank(Root)[ND_rank(v)].v[ND_order(v) - 1];
  794. } else
  795. rv = GD_rank(Root)[ND_rank(v)].v[ND_order(v) + 1];
  796. assert((rv == 0) || (ND_order(rv)-ND_order(v))*dir > 0);
  797. return rv;
  798. }
  799. static bool is_a_normal_node_of(graph_t *g, node_t *v) {
  800. return ND_node_type(v) == NORMAL && agcontains(g, v);
  801. }
  802. static bool is_a_vnode_of_an_edge_of(graph_t *g, node_t *v) {
  803. if (ND_node_type(v) == VIRTUAL
  804. && ND_in(v).size == 1 && ND_out(v).size == 1) {
  805. edge_t *e = ND_out(v).list[0];
  806. while (ED_edge_type(e) != NORMAL)
  807. e = ED_to_orig(e);
  808. if (agcontains(g, e))
  809. return true;
  810. }
  811. return false;
  812. }
  813. static bool inside_cluster(graph_t *g, node_t *v) {
  814. return is_a_normal_node_of(g, v) || is_a_vnode_of_an_edge_of(g, v);
  815. }
  816. static node_t *furthestnode(graph_t * g, node_t * v, int dir)
  817. {
  818. node_t *u, *rv;
  819. rv = u = v;
  820. while ((u = neighbor(u, dir))) {
  821. if (is_a_normal_node_of(g, u))
  822. rv = u;
  823. else if (is_a_vnode_of_an_edge_of(g, u))
  824. rv = u;
  825. }
  826. return rv;
  827. }
  828. void save_vlist(graph_t * g)
  829. {
  830. int r;
  831. if (GD_rankleader(g))
  832. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  833. GD_rankleader(g)[r] = GD_rank(g)[r].v[0];
  834. }
  835. }
  836. void rec_save_vlists(graph_t * g)
  837. {
  838. int c;
  839. save_vlist(g);
  840. for (c = 1; c <= GD_n_cluster(g); c++)
  841. rec_save_vlists(GD_clust(g)[c]);
  842. }
  843. void rec_reset_vlists(graph_t * g)
  844. {
  845. int r, c;
  846. node_t *u, *v, *w;
  847. /* fix vlists of sub-clusters */
  848. for (c = 1; c <= GD_n_cluster(g); c++)
  849. rec_reset_vlists(GD_clust(g)[c]);
  850. if (GD_rankleader(g))
  851. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  852. v = GD_rankleader(g)[r];
  853. #ifdef DEBUG
  854. node_in_root_vlist(v);
  855. #endif
  856. u = furthestnode(g, v, -1);
  857. w = furthestnode(g, v, 1);
  858. GD_rankleader(g)[r] = u;
  859. #ifdef DEBUG
  860. assert(GD_rank(dot_root(g))[r].v[ND_order(u)] == u);
  861. #endif
  862. GD_rank(g)[r].v = GD_rank(dot_root(g))[r].v + ND_order(u);
  863. GD_rank(g)[r].n = ND_order(w) - ND_order(u) + 1;
  864. }
  865. }
  866. /* realFillRanks:
  867. * The structures in crossing minimization and positioning require
  868. * that clusters have some node on each rank. This function recursively
  869. * guarantees this property. It takes into account nodes and edges in
  870. * a cluster, the latter causing dummy nodes for intervening ranks.
  871. * For any rank without node, we create a real node of small size. This
  872. * is stored in the subgraph sg, for easy removal later.
  873. *
  874. * I believe it is not necessary to do this for the root graph, as these
  875. * are laid out one component at a time and these will necessarily have a
  876. * node on each rank from source to sink levels.
  877. */
  878. static Agraph_t*
  879. realFillRanks (Agraph_t* g, int rnks[], int rnks_sz, Agraph_t* sg)
  880. {
  881. int i, c;
  882. Agedge_t* e;
  883. Agnode_t* n;
  884. for (c = 1; c <= GD_n_cluster(g); c++)
  885. sg = realFillRanks (GD_clust(g)[c], rnks, rnks_sz, sg);
  886. if (dot_root(g) == g)
  887. return sg;
  888. memset (rnks, 0, sizeof(int)*rnks_sz);
  889. for (n = agfstnode(g); n; n = agnxtnode(g,n)) {
  890. rnks[ND_rank(n)] = 1;
  891. for (e = agfstout(g,n); e; e = agnxtout(g,e)) {
  892. for (i = ND_rank(n)+1; i <= ND_rank(aghead(e)); i++)
  893. rnks[i] = 1;
  894. }
  895. }
  896. for (i = GD_minrank(g); i <= GD_maxrank(g); i++) {
  897. if (rnks[i] == 0) {
  898. if (!sg) {
  899. sg = agsubg (dot_root(g), "_new_rank", 1);
  900. }
  901. n = agnode (sg, NULL, 1);
  902. agbindrec(n, "Agnodeinfo_t", sizeof(Agnodeinfo_t), true);
  903. ND_rank(n) = i;
  904. ND_lw(n) = ND_rw(n) = 0.5;
  905. ND_ht(n) = 1;
  906. ND_UF_size(n) = 1;
  907. alloc_elist(4, ND_in(n));
  908. alloc_elist(4, ND_out(n));
  909. agsubnode (g, n, 1);
  910. }
  911. }
  912. return sg;
  913. }
  914. static void
  915. fillRanks (Agraph_t* g)
  916. {
  917. int rnks_sz = GD_maxrank(g) + 2;
  918. int *rnks = gv_calloc(rnks_sz, sizeof(int));
  919. realFillRanks (g, rnks, rnks_sz, NULL);
  920. free (rnks);
  921. }
  922. static void init_mincross(graph_t * g)
  923. {
  924. int size;
  925. if (Verbose)
  926. start_timer();
  927. ReMincross = false;
  928. Root = g;
  929. /* alloc +1 for the null terminator usage in do_ordering() */
  930. size = agnedges(dot_root(g)) + 1;
  931. TE_list = gv_calloc(size, sizeof(edge_t*));
  932. TI_list = gv_calloc(size, sizeof(int));
  933. mincross_options(g);
  934. if (GD_flags(g) & NEW_RANK)
  935. fillRanks (g);
  936. class2(g);
  937. decompose(g, 1);
  938. allocate_ranks(g);
  939. ordered_edges(g);
  940. GlobalMinRank = GD_minrank(g);
  941. GlobalMaxRank = GD_maxrank(g);
  942. }
  943. static void flat_rev(Agraph_t * g, Agedge_t * e)
  944. {
  945. int j;
  946. Agedge_t *rev;
  947. if (!ND_flat_out(aghead(e)).list)
  948. rev = NULL;
  949. else
  950. for (j = 0; (rev = ND_flat_out(aghead(e)).list[j]); j++)
  951. if (aghead(rev) == agtail(e))
  952. break;
  953. if (rev) {
  954. merge_oneway(e, rev);
  955. if (ED_edge_type(rev) == FLATORDER && ED_to_orig(rev) == 0)
  956. ED_to_orig(rev) = e;
  957. elist_append(e, ND_other(agtail(e)));
  958. } else {
  959. rev = new_virtual_edge(aghead(e), agtail(e), e);
  960. if (ED_edge_type(e) == FLATORDER)
  961. ED_edge_type(rev) = FLATORDER;
  962. else
  963. ED_edge_type(rev) = REVERSED;
  964. ED_label(rev) = ED_label(e);
  965. flat_edge(g, rev);
  966. }
  967. }
  968. static void flat_search(graph_t * g, node_t * v)
  969. {
  970. int i;
  971. bool hascl;
  972. edge_t *e;
  973. adjmatrix_t *M = GD_rank(g)[ND_rank(v)].flat;
  974. ND_mark(v) = true;
  975. ND_onstack(v) = true;
  976. hascl = GD_n_cluster(dot_root(g)) > 0;
  977. if (ND_flat_out(v).list)
  978. for (i = 0; (e = ND_flat_out(v).list[i]); i++) {
  979. if (hascl && !(agcontains(g, agtail(e)) && agcontains(g, aghead(e))))
  980. continue;
  981. if (ED_weight(e) == 0)
  982. continue;
  983. if (ND_onstack(aghead(e))) {
  984. assert(flatindex(aghead(e)) < M->nrows);
  985. assert(flatindex(agtail(e)) < M->ncols);
  986. ELT(M, flatindex(aghead(e)), flatindex(agtail(e))) = 1;
  987. delete_flat_edge(e);
  988. i--;
  989. if (ED_edge_type(e) == FLATORDER)
  990. continue;
  991. flat_rev(g, e);
  992. } else {
  993. assert(flatindex(aghead(e)) < M->nrows);
  994. assert(flatindex(agtail(e)) < M->ncols);
  995. ELT(M, flatindex(agtail(e)), flatindex(aghead(e))) = 1;
  996. if (!ND_mark(aghead(e)))
  997. flat_search(g, aghead(e));
  998. }
  999. }
  1000. ND_onstack(v) = false;
  1001. }
  1002. static void flat_breakcycles(graph_t * g)
  1003. {
  1004. int i, r, flat;
  1005. node_t *v;
  1006. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  1007. flat = 0;
  1008. for (i = 0; i < GD_rank(g)[r].n; i++) {
  1009. v = GD_rank(g)[r].v[i];
  1010. ND_mark(v) = false;
  1011. ND_onstack(v) = false;
  1012. ND_low(v) = i;
  1013. if (ND_flat_out(v).size > 0 && flat == 0) {
  1014. GD_rank(g)[r].flat =
  1015. new_matrix((size_t)GD_rank(g)[r].n, (size_t)GD_rank(g)[r].n);
  1016. flat = 1;
  1017. }
  1018. }
  1019. if (flat) {
  1020. for (i = 0; i < GD_rank(g)[r].n; i++) {
  1021. v = GD_rank(g)[r].v[i];
  1022. if (!ND_mark(v))
  1023. flat_search(g, v);
  1024. }
  1025. }
  1026. }
  1027. }
  1028. /* allocate_ranks:
  1029. * Allocate rank structure, determining number of nodes per rank.
  1030. * Note that no nodes are put into the structure yet.
  1031. */
  1032. void allocate_ranks(graph_t * g)
  1033. {
  1034. int r, low, high;
  1035. node_t *n;
  1036. edge_t *e;
  1037. int *cn = gv_calloc(GD_maxrank(g) + 2, sizeof(int)); // must be 0 based, not GD_minrank
  1038. for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
  1039. cn[ND_rank(n)]++;
  1040. for (e = agfstout(g, n); e; e = agnxtout(g, e)) {
  1041. low = ND_rank(agtail(e));
  1042. high = ND_rank(aghead(e));
  1043. if (low > high) {
  1044. int t = low;
  1045. low = high;
  1046. high = t;
  1047. }
  1048. for (r = low + 1; r < high; r++)
  1049. cn[r]++;
  1050. }
  1051. }
  1052. GD_rank(g) = gv_calloc(GD_maxrank(g) + 2, sizeof(rank_t));
  1053. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  1054. GD_rank(g)[r].an = GD_rank(g)[r].n = cn[r] + 1;
  1055. GD_rank(g)[r].av = GD_rank(g)[r].v = gv_calloc(cn[r] + 1, sizeof(node_t*));
  1056. }
  1057. free(cn);
  1058. }
  1059. /* install a node at the current right end of its rank */
  1060. void install_in_rank(graph_t * g, node_t * n)
  1061. {
  1062. int i, r;
  1063. r = ND_rank(n);
  1064. i = GD_rank(g)[r].n;
  1065. if (GD_rank(g)[r].an <= 0) {
  1066. agerrorf("install_in_rank, line %d: %s %s rank %d i = %d an = 0\n",
  1067. __LINE__, agnameof(g), agnameof(n), r, i);
  1068. return;
  1069. }
  1070. GD_rank(g)[r].v[i] = n;
  1071. ND_order(n) = i;
  1072. GD_rank(g)[r].n++;
  1073. assert(GD_rank(g)[r].n <= GD_rank(g)[r].an);
  1074. #ifdef DEBUG
  1075. {
  1076. node_t *v;
  1077. for (v = GD_nlist(g); v; v = ND_next(v))
  1078. if (v == n)
  1079. break;
  1080. assert(v != NULL);
  1081. }
  1082. #endif
  1083. if (ND_order(n) > GD_rank(Root)[r].an) {
  1084. agerrorf("install_in_rank, line %d: ND_order(%s) [%d] > GD_rank(Root)[%d].an [%d]\n",
  1085. __LINE__, agnameof(n), ND_order(n), r, GD_rank(Root)[r].an);
  1086. return;
  1087. }
  1088. if (r < GD_minrank(g) || r > GD_maxrank(g)) {
  1089. agerrorf("install_in_rank, line %d: rank %d not in rank range [%d,%d]\n",
  1090. __LINE__, r, GD_minrank(g), GD_maxrank(g));
  1091. return;
  1092. }
  1093. if (GD_rank(g)[r].v + ND_order(n) >
  1094. GD_rank(g)[r].av + GD_rank(Root)[r].an) {
  1095. agerrorf("install_in_rank, line %d: GD_rank(g)[%d].v + ND_order(%s) [%d] > GD_rank(g)[%d].av + GD_rank(Root)[%d].an [%d]\n",
  1096. __LINE__, r, agnameof(n),ND_order(n), r, r, GD_rank(Root)[r].an);
  1097. return;
  1098. }
  1099. }
  1100. /* install nodes in ranks. the initial ordering ensure that series-parallel
  1101. * graphs such as trees are drawn with no crossings. it tries searching
  1102. * in- and out-edges and takes the better of the two initial orderings.
  1103. */
  1104. void build_ranks(graph_t *g, int pass, ints_t *scratch) {
  1105. int i, j;
  1106. node_t *n, *ns;
  1107. edge_t **otheredges;
  1108. node_queue_t q = {0};
  1109. for (n = GD_nlist(g); n; n = ND_next(n))
  1110. MARK(n) = false;
  1111. #ifdef DEBUG
  1112. {
  1113. edge_t *e;
  1114. for (n = GD_nlist(g); n; n = ND_next(n)) {
  1115. for (i = 0; (e = ND_out(n).list[i]); i++)
  1116. assert(!MARK(aghead(e)));
  1117. for (i = 0; (e = ND_in(n).list[i]); i++)
  1118. assert(!MARK(agtail(e)));
  1119. }
  1120. }
  1121. #endif
  1122. for (i = GD_minrank(g); i <= GD_maxrank(g); i++)
  1123. GD_rank(g)[i].n = 0;
  1124. const bool walkbackwards = g != agroot(g); // if this is a cluster, need to
  1125. // walk GD_nlist backward to
  1126. // preserve input node order
  1127. if (walkbackwards) {
  1128. for (ns = GD_nlist(g); ND_next(ns); ns = ND_next(ns)) {
  1129. ;
  1130. }
  1131. } else {
  1132. ns = GD_nlist(g);
  1133. }
  1134. for (n = ns; n; n = walkbackwards ? ND_prev(n) : ND_next(n)) {
  1135. otheredges = pass == 0 ? ND_in(n).list : ND_out(n).list;
  1136. if (otheredges[0] != NULL)
  1137. continue;
  1138. if (!MARK(n)) {
  1139. MARK(n) = true;
  1140. node_queue_push_back(&q, n);
  1141. while (!node_queue_is_empty(&q)) {
  1142. node_t *n0 = node_queue_pop_front(&q);
  1143. if (ND_ranktype(n0) != CLUSTER) {
  1144. install_in_rank(g, n0);
  1145. enqueue_neighbors(&q, n0, pass);
  1146. } else {
  1147. install_cluster(g, n0, pass, &q);
  1148. }
  1149. }
  1150. }
  1151. }
  1152. assert(node_queue_is_empty(&q));
  1153. for (i = GD_minrank(g); i <= GD_maxrank(g); i++) {
  1154. GD_rank(Root)[i].valid = false;
  1155. if (GD_flip(g) && GD_rank(g)[i].n > 0) {
  1156. node_t **vlist = GD_rank(g)[i].v;
  1157. int num_nodes_1 = GD_rank(g)[i].n - 1;
  1158. int half_num_nodes_1 = num_nodes_1 / 2;
  1159. for (j = 0; j <= half_num_nodes_1; j++)
  1160. exchange(vlist[j], vlist[num_nodes_1 - j]);
  1161. }
  1162. }
  1163. if (g == dot_root(g) && ncross(scratch) > 0)
  1164. transpose(g, false);
  1165. node_queue_free(&q);
  1166. }
  1167. void enqueue_neighbors(node_queue_t *q, node_t *n0, int pass) {
  1168. edge_t *e;
  1169. if (pass == 0) {
  1170. for (size_t i = 0; i < ND_out(n0).size; i++) {
  1171. e = ND_out(n0).list[i];
  1172. if (!MARK(aghead(e))) {
  1173. MARK(aghead(e)) = true;
  1174. node_queue_push_back(q, aghead(e));
  1175. }
  1176. }
  1177. } else {
  1178. for (size_t i = 0; i < ND_in(n0).size; i++) {
  1179. e = ND_in(n0).list[i];
  1180. if (!MARK(agtail(e))) {
  1181. MARK(agtail(e)) = true;
  1182. node_queue_push_back(q, agtail(e));
  1183. }
  1184. }
  1185. }
  1186. }
  1187. static bool constraining_flat_edge(Agraph_t *g, Agedge_t *e) {
  1188. if (ED_weight(e) == 0)
  1189. return false;
  1190. if (!inside_cluster(g, agtail(e)))
  1191. return false;
  1192. if (!inside_cluster(g, aghead(e)))
  1193. return false;
  1194. return true;
  1195. }
  1196. DEFINE_LIST(nodes, node_t *)
  1197. /* construct nodes reachable from 'here' in post-order.
  1198. * This is the same as doing a topological sort in reverse order.
  1199. */
  1200. static void postorder(graph_t *g, node_t *v, nodes_t *list, int r) {
  1201. edge_t *e;
  1202. int i;
  1203. MARK(v) = true;
  1204. if (ND_flat_out(v).size > 0) {
  1205. for (i = 0; (e = ND_flat_out(v).list[i]); i++) {
  1206. if (!constraining_flat_edge(g, e)) continue;
  1207. if (!MARK(aghead(e)))
  1208. postorder(g, aghead(e), list, r);
  1209. }
  1210. }
  1211. assert(ND_rank(v) == r);
  1212. nodes_append(list, v);
  1213. }
  1214. static void flat_reorder(graph_t * g)
  1215. {
  1216. int i, r, local_in_cnt, local_out_cnt, base_order;
  1217. node_t *v;
  1218. nodes_t temprank = {0};
  1219. edge_t *flat_e, *e;
  1220. if (!GD_has_flat_edges(g))
  1221. return;
  1222. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  1223. if (GD_rank(g)[r].n == 0) continue;
  1224. base_order = ND_order(GD_rank(g)[r].v[0]);
  1225. for (i = 0; i < GD_rank(g)[r].n; i++)
  1226. MARK(GD_rank(g)[r].v[i]) = false;
  1227. nodes_clear(&temprank);
  1228. /* construct reverse topological sort order in temprank */
  1229. for (i = 0; i < GD_rank(g)[r].n; i++) {
  1230. if (GD_flip(g)) v = GD_rank(g)[r].v[i];
  1231. else v = GD_rank(g)[r].v[GD_rank(g)[r].n - i - 1];
  1232. local_in_cnt = local_out_cnt = 0;
  1233. for (size_t j = 0; j < ND_flat_in(v).size; j++) {
  1234. flat_e = ND_flat_in(v).list[j];
  1235. if (constraining_flat_edge(g, flat_e)) local_in_cnt++;
  1236. }
  1237. for (size_t j = 0; j < ND_flat_out(v).size; j++) {
  1238. flat_e = ND_flat_out(v).list[j];
  1239. if (constraining_flat_edge(g, flat_e)) local_out_cnt++;
  1240. }
  1241. if ((local_in_cnt == 0) && (local_out_cnt == 0))
  1242. nodes_append(&temprank, v);
  1243. else {
  1244. if (!MARK(v) && local_in_cnt == 0) {
  1245. postorder(g, v, &temprank, r);
  1246. }
  1247. }
  1248. }
  1249. if (nodes_size(&temprank) > 0) {
  1250. if (!GD_flip(g)) {
  1251. nodes_reverse(&temprank);
  1252. }
  1253. for (i = 0; i < GD_rank(g)[r].n; i++) {
  1254. v = GD_rank(g)[r].v[i] = nodes_get(&temprank, (size_t)i);
  1255. ND_order(v) = i + base_order;
  1256. }
  1257. /* nonconstraint flat edges must be made LR */
  1258. for (i = 0; i < GD_rank(g)[r].n; i++) {
  1259. v = GD_rank(g)[r].v[i];
  1260. if (ND_flat_out(v).list) {
  1261. for (size_t j = 0; (e = ND_flat_out(v).list[j]); j++) {
  1262. if ( (!GD_flip(g) && ND_order(aghead(e)) < ND_order(agtail(e))) ||
  1263. ( (GD_flip(g)) && (ND_order(aghead(e)) > ND_order(agtail(e)) ))) {
  1264. assert(!constraining_flat_edge(g, e));
  1265. delete_flat_edge(e);
  1266. j--;
  1267. flat_rev(g, e);
  1268. }
  1269. }
  1270. }
  1271. }
  1272. /* postprocess to restore intended order */
  1273. }
  1274. /* else do no harm! */
  1275. GD_rank(Root)[r].valid = false;
  1276. }
  1277. nodes_free(&temprank);
  1278. }
  1279. static void reorder(graph_t * g, int r, bool reverse, bool hasfixed)
  1280. {
  1281. int changed = 0, nelt;
  1282. node_t **vlist = GD_rank(g)[r].v;
  1283. node_t **lp, **rp, **ep = vlist + GD_rank(g)[r].n;
  1284. for (nelt = GD_rank(g)[r].n - 1; nelt >= 0; nelt--) {
  1285. lp = vlist;
  1286. while (lp < ep) {
  1287. /* find leftmost node that can be compared */
  1288. while (lp < ep && ND_mval(*lp) < 0)
  1289. lp++;
  1290. if (lp >= ep)
  1291. break;
  1292. /* find the node that can be compared */
  1293. bool sawclust = false;
  1294. bool muststay = false;
  1295. for (rp = lp + 1; rp < ep; rp++) {
  1296. if (sawclust && ND_clust(*rp))
  1297. continue; /* ### */
  1298. if (left2right(g, *lp, *rp)) {
  1299. muststay = true;
  1300. break;
  1301. }
  1302. if (ND_mval(*rp) >= 0)
  1303. break;
  1304. if (ND_clust(*rp))
  1305. sawclust = true; /* ### */
  1306. }
  1307. if (rp >= ep)
  1308. break;
  1309. if (!muststay) {
  1310. const double p1 = ND_mval(*lp);
  1311. const double p2 = ND_mval(*rp);
  1312. if (p1 > p2 || (p1 >= p2 && reverse)) {
  1313. exchange(*lp, *rp);
  1314. changed++;
  1315. }
  1316. }
  1317. lp = rp;
  1318. }
  1319. if (!hasfixed && !reverse)
  1320. ep--;
  1321. }
  1322. if (changed) {
  1323. GD_rank(Root)[r].valid = false;
  1324. if (r > 0)
  1325. GD_rank(Root)[r - 1].valid = false;
  1326. }
  1327. }
  1328. static void mincross_step(graph_t * g, int pass)
  1329. {
  1330. int r, other, first, last, dir;
  1331. bool reverse = pass % 4 < 2;
  1332. if (pass % 2 == 0) { /* down pass */
  1333. first = GD_minrank(g) + 1;
  1334. if (GD_minrank(g) > GD_minrank(Root))
  1335. first--;
  1336. last = GD_maxrank(g);
  1337. dir = 1;
  1338. } else { /* up pass */
  1339. first = GD_maxrank(g) - 1;
  1340. last = GD_minrank(g);
  1341. if (GD_maxrank(g) < GD_maxrank(Root))
  1342. first++;
  1343. dir = -1;
  1344. }
  1345. for (r = first; r != last + dir; r += dir) {
  1346. other = r - dir;
  1347. bool hasfixed = medians(g, r, other);
  1348. reorder(g, r, reverse, hasfixed);
  1349. }
  1350. transpose(g, !reverse);
  1351. }
  1352. static int local_cross(elist l, int dir)
  1353. {
  1354. int i, j;
  1355. int cross = 0;
  1356. edge_t *e, *f;
  1357. bool is_out = dir > 0;
  1358. for (i = 0; (e = l.list[i]); i++) {
  1359. if (is_out)
  1360. for (j = i + 1; (f = l.list[j]); j++) {
  1361. if ((ND_order(aghead(f)) - ND_order(aghead(e)))
  1362. * (ED_tail_port(f).p.x - ED_tail_port(e).p.x) < 0)
  1363. cross += ED_xpenalty(e) * ED_xpenalty(f);
  1364. } else
  1365. for (j = i + 1; (f = l.list[j]); j++) {
  1366. if ((ND_order(agtail(f)) - ND_order(agtail(e)))
  1367. * (ED_head_port(f).p.x - ED_head_port(e).p.x) < 0)
  1368. cross += ED_xpenalty(e) * ED_xpenalty(f);
  1369. }
  1370. }
  1371. return cross;
  1372. }
  1373. static int rcross(graph_t *g, int r, ints_t *Count) {
  1374. int top, bot, cross, max, i, k;
  1375. node_t **rtop, *v;
  1376. cross = 0;
  1377. max = 0;
  1378. rtop = GD_rank(g)[r].v;
  1379. // discard any data from previous runs
  1380. ints_clear(Count);
  1381. for (top = 0; top < GD_rank(g)[r].n; top++) {
  1382. edge_t *e;
  1383. if (max > 0) {
  1384. for (i = 0; (e = ND_out(rtop[top]).list[i]); i++) {
  1385. for (k = ND_order(aghead(e)) + 1; k <= max; k++)
  1386. cross += ints_size(Count) <= (size_t)k
  1387. ? 0
  1388. : ints_get(Count, (size_t)k) * ED_xpenalty(e);
  1389. }
  1390. }
  1391. for (i = 0; (e = ND_out(rtop[top]).list[i]); i++) {
  1392. int inv = ND_order(aghead(e));
  1393. if (inv > max)
  1394. max = inv;
  1395. const size_t inv_z = (size_t)inv;
  1396. if (ints_size(Count) <= inv_z) {
  1397. ints_resize(Count, inv_z + 1, 0);
  1398. }
  1399. ints_set(Count, inv_z, ints_get(Count, inv_z) + ED_xpenalty(e));
  1400. }
  1401. }
  1402. for (top = 0; top < GD_rank(g)[r].n; top++) {
  1403. v = GD_rank(g)[r].v[top];
  1404. if (ND_has_port(v))
  1405. cross += local_cross(ND_out(v), 1);
  1406. }
  1407. for (bot = 0; bot < GD_rank(g)[r + 1].n; bot++) {
  1408. v = GD_rank(g)[r + 1].v[bot];
  1409. if (ND_has_port(v))
  1410. cross += local_cross(ND_in(v), -1);
  1411. }
  1412. return cross;
  1413. }
  1414. static int ncross(ints_t *scratch) {
  1415. assert(scratch != NULL);
  1416. int r, count, nc;
  1417. graph_t *g = Root;
  1418. count = 0;
  1419. for (r = GD_minrank(g); r < GD_maxrank(g); r++) {
  1420. if (GD_rank(g)[r].valid)
  1421. count += GD_rank(g)[r].cache_nc;
  1422. else {
  1423. nc = GD_rank(g)[r].cache_nc = rcross(g, r, scratch);
  1424. count += nc;
  1425. GD_rank(g)[r].valid = true;
  1426. }
  1427. }
  1428. return count;
  1429. }
  1430. static int ordercmpf(const void *x, const void *y) {
  1431. const int *i0 = x;
  1432. const int *i1 = y;
  1433. if (*i0 < *i1) {
  1434. return -1;
  1435. }
  1436. if (*i0 > *i1) {
  1437. return 1;
  1438. }
  1439. return 0;
  1440. }
  1441. /* flat_mval:
  1442. * Calculate a mval for nodes with no in or out non-flat edges.
  1443. * Assume (ND_out(n).size == 0) && (ND_in(n).size == 0)
  1444. * Find flat edge a->n where a has the largest order and set
  1445. * n.mval = a.mval+1, assuming a.mval is defined (>=0).
  1446. * If there are no flat in edges, find flat edge n->a where a
  1447. * has the smallest order and set * n.mval = a.mval-1, assuming
  1448. * a.mval is > 0.
  1449. * Return true if n.mval is left -1, indicating a fixed node for sorting.
  1450. */
  1451. static bool flat_mval(node_t * n)
  1452. {
  1453. int i;
  1454. edge_t *e, **fl;
  1455. node_t *nn;
  1456. if (ND_flat_in(n).size > 0) {
  1457. fl = ND_flat_in(n).list;
  1458. nn = agtail(fl[0]);
  1459. for (i = 1; (e = fl[i]); i++)
  1460. if (ND_order(agtail(e)) > ND_order(nn))
  1461. nn = agtail(e);
  1462. if (ND_mval(nn) >= 0) {
  1463. ND_mval(n) = ND_mval(nn) + 1;
  1464. return false;
  1465. }
  1466. } else if (ND_flat_out(n).size > 0) {
  1467. fl = ND_flat_out(n).list;
  1468. nn = aghead(fl[0]);
  1469. for (i = 1; (e = fl[i]); i++)
  1470. if (ND_order(aghead(e)) < ND_order(nn))
  1471. nn = aghead(e);
  1472. if (ND_mval(nn) > 0) {
  1473. ND_mval(n) = ND_mval(nn) - 1;
  1474. return false;
  1475. }
  1476. }
  1477. return true;
  1478. }
  1479. #define VAL(node,port) (MC_SCALE * ND_order(node) + (port).order)
  1480. static bool medians(graph_t * g, int r0, int r1)
  1481. {
  1482. int i, j0, lspan, rspan, *list;
  1483. node_t *n, **v;
  1484. edge_t *e;
  1485. bool hasfixed = false;
  1486. list = TI_list;
  1487. v = GD_rank(g)[r0].v;
  1488. for (i = 0; i < GD_rank(g)[r0].n; i++) {
  1489. n = v[i];
  1490. size_t j = 0;
  1491. if (r1 > r0)
  1492. for (j0 = 0; (e = ND_out(n).list[j0]); j0++) {
  1493. if (ED_xpenalty(e) > 0)
  1494. list[j++] = VAL(aghead(e), ED_head_port(e));
  1495. } else
  1496. for (j0 = 0; (e = ND_in(n).list[j0]); j0++) {
  1497. if (ED_xpenalty(e) > 0)
  1498. list[j++] = VAL(agtail(e), ED_tail_port(e));
  1499. }
  1500. switch (j) {
  1501. case 0:
  1502. ND_mval(n) = -1;
  1503. break;
  1504. case 1:
  1505. ND_mval(n) = list[0];
  1506. break;
  1507. case 2:
  1508. ND_mval(n) = (list[0] + list[1]) / 2;
  1509. break;
  1510. default:
  1511. qsort(list, j, sizeof(int), ordercmpf);
  1512. if (j % 2)
  1513. ND_mval(n) = list[j / 2];
  1514. else {
  1515. /* weighted median */
  1516. size_t rm = j / 2;
  1517. size_t lm = rm - 1;
  1518. rspan = list[j - 1] - list[rm];
  1519. lspan = list[lm] - list[0];
  1520. if (lspan == rspan)
  1521. ND_mval(n) = (list[lm] + list[rm]) / 2;
  1522. else {
  1523. double w = list[lm] * (double)rspan + list[rm] * (double)lspan;
  1524. ND_mval(n) = w / (lspan + rspan);
  1525. }
  1526. }
  1527. }
  1528. }
  1529. for (i = 0; i < GD_rank(g)[r0].n; i++) {
  1530. n = v[i];
  1531. if ((ND_out(n).size == 0) && (ND_in(n).size == 0))
  1532. hasfixed |= flat_mval(n);
  1533. }
  1534. return hasfixed;
  1535. }
  1536. static int nodeposcmpf(const void *x, const void *y) {
  1537. // Suppress Clang/GCC -Wcast-qual warning. Casting away const here is acceptable
  1538. // as the later usage is const. We need the cast because the macros use
  1539. // non-const pointers for genericity.
  1540. #ifdef __GNUC__
  1541. #pragma GCC diagnostic push
  1542. #pragma GCC diagnostic ignored "-Wcast-qual"
  1543. #endif
  1544. node_t **n0 = (node_t **)x;
  1545. node_t **n1 = (node_t **)y;
  1546. #ifdef __GNUC__
  1547. #pragma GCC diagnostic pop
  1548. #endif
  1549. if (ND_order(*n0) < ND_order(*n1)) {
  1550. return -1;
  1551. }
  1552. if (ND_order(*n0) > ND_order(*n1)) {
  1553. return 1;
  1554. }
  1555. return 0;
  1556. }
  1557. static int edgeidcmpf(const void *x, const void *y) {
  1558. // Suppress Clang/GCC -Wcast-qual warning. Casting away const here is acceptable
  1559. // as the later usage is const. We need the cast because the macros use
  1560. // non-const pointers for genericity.
  1561. #ifdef __GNUC__
  1562. #pragma GCC diagnostic push
  1563. #pragma GCC diagnostic ignored "-Wcast-qual"
  1564. #endif
  1565. edge_t **e0 = (edge_t **)x;
  1566. edge_t **e1 = (edge_t **)y;
  1567. #ifdef __GNUC__
  1568. #pragma GCC diagnostic pop
  1569. #endif
  1570. if (AGSEQ(*e0) < AGSEQ(*e1)) {
  1571. return -1;
  1572. }
  1573. if (AGSEQ(*e0) > AGSEQ(*e1)) {
  1574. return 1;
  1575. }
  1576. return 0;
  1577. }
  1578. /* following code deals with weights of edges of "virtual" nodes */
  1579. #define ORDINARY 0
  1580. #define SINGLETON 1
  1581. #define VIRTUALNODE 2
  1582. #define NTYPES 3
  1583. #define C_EE 1
  1584. #define C_VS 2
  1585. #define C_SS 2
  1586. #define C_VV 4
  1587. static int table[NTYPES][NTYPES] = {
  1588. /* ordinary */ {C_EE, C_EE, C_EE},
  1589. /* singleton */ {C_EE, C_SS, C_VS},
  1590. /* virtual */ {C_EE, C_VS, C_VV}
  1591. };
  1592. static int endpoint_class(node_t * n)
  1593. {
  1594. if (ND_node_type(n) == VIRTUAL)
  1595. return VIRTUALNODE;
  1596. if (ND_weight_class(n) <= 1)
  1597. return SINGLETON;
  1598. return ORDINARY;
  1599. }
  1600. void virtual_weight(edge_t * e)
  1601. {
  1602. int t;
  1603. t = table[endpoint_class(agtail(e))][endpoint_class(aghead(e))];
  1604. /* check whether the upcoming computation will overflow */
  1605. assert(t >= 0);
  1606. if (INT_MAX / t < ED_weight(e)) {
  1607. agerrorf("overflow when calculating virtual weight of edge\n");
  1608. graphviz_exit(EXIT_FAILURE);
  1609. }
  1610. ED_weight(e) *= t;
  1611. }
  1612. #ifdef DEBUG
  1613. void check_rs(graph_t * g, int null_ok)
  1614. {
  1615. int i, r;
  1616. node_t *v, *prev;
  1617. fprintf(stderr, "\n\n%s:\n", agnameof(g));
  1618. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  1619. fprintf(stderr, "%d: ", r);
  1620. prev = NULL;
  1621. for (i = 0; i < GD_rank(g)[r].n; i++) {
  1622. v = GD_rank(g)[r].v[i];
  1623. if (v == NULL) {
  1624. fprintf(stderr, "NULL\t");
  1625. if (!null_ok)
  1626. abort();
  1627. } else {
  1628. fprintf(stderr, "%s(%f)\t", agnameof(v), ND_mval(v));
  1629. assert(ND_rank(v) == r);
  1630. assert(v != prev);
  1631. prev = v;
  1632. }
  1633. }
  1634. fprintf(stderr, "\n");
  1635. }
  1636. }
  1637. void check_order(void)
  1638. {
  1639. int i, r;
  1640. node_t *v;
  1641. graph_t *g = Root;
  1642. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  1643. assert(GD_rank(g)[r].v[GD_rank(g)[r].n] == NULL);
  1644. for (i = 0; (v = GD_rank(g)[r].v[i]); i++) {
  1645. assert(ND_rank(v) == r);
  1646. assert(ND_order(v) == i);
  1647. }
  1648. }
  1649. }
  1650. #endif
  1651. static void mincross_options(graph_t * g)
  1652. {
  1653. char *p;
  1654. double f;
  1655. /* set default values */
  1656. MinQuit = 8;
  1657. MaxIter = 24;
  1658. p = agget(g, "mclimit");
  1659. if (p && (f = atof(p)) > 0.0) {
  1660. MinQuit = MAX(1, MinQuit * f);
  1661. MaxIter = MAX(1, MaxIter * f);
  1662. }
  1663. }
  1664. #ifdef DEBUG
  1665. void check_exchange(node_t * v, node_t * w)
  1666. {
  1667. int i, r;
  1668. node_t *u;
  1669. if (ND_clust(v) == NULL && ND_clust(w) == NULL)
  1670. return;
  1671. assert(ND_clust(v) == NULL || ND_clust(w) == NULL);
  1672. assert(ND_rank(v) == ND_rank(w));
  1673. assert(ND_order(v) < ND_order(w));
  1674. r = ND_rank(v);
  1675. for (i = ND_order(v) + 1; i < ND_order(w); i++) {
  1676. u = GD_rank(dot_root(v))[r].v[i];
  1677. if (ND_clust(u))
  1678. abort();
  1679. }
  1680. }
  1681. void check_vlists(graph_t * g)
  1682. {
  1683. int c, i, j, r;
  1684. node_t *u;
  1685. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  1686. for (i = 0; i < GD_rank(g)[r].n; i++) {
  1687. u = GD_rank(g)[r].v[i];
  1688. j = ND_order(u);
  1689. assert(GD_rank(Root)[r].v[j] == u);
  1690. }
  1691. if (GD_rankleader(g)) {
  1692. u = GD_rankleader(g)[r];
  1693. j = ND_order(u);
  1694. assert(GD_rank(Root)[r].v[j] == u);
  1695. }
  1696. }
  1697. for (c = 1; c <= GD_n_cluster(g); c++)
  1698. check_vlists(GD_clust(g)[c]);
  1699. }
  1700. void node_in_root_vlist(node_t * n)
  1701. {
  1702. node_t **vptr;
  1703. for (vptr = GD_rank(Root)[ND_rank(n)].v; *vptr; vptr++)
  1704. if (*vptr == n)
  1705. break;
  1706. if (*vptr == 0)
  1707. abort();
  1708. }
  1709. #endif /* DEBUG code */