position.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140
  1. /*************************************************************************
  2. * Copyright (c) 2011 AT&T Intellectual Property
  3. * All rights reserved. This program and the accompanying materials
  4. * are made available under the terms of the Eclipse Public License v1.0
  5. * which accompanies this distribution, and is available at
  6. * https://www.eclipse.org/legal/epl-v10.html
  7. *
  8. * Contributors: Details at https://graphviz.org
  9. *************************************************************************/
  10. /*
  11. * position(g): set ND_coord(n) (x and y) for all nodes n of g, using GD_rank(g).
  12. * (the graph may be modified by merging certain edges with a common endpoint.)
  13. * the coordinates are computed by constructing and ranking an auxiliary graph.
  14. * then leaf nodes are inserted in the fast graph. cluster boundary nodes are
  15. * created and correctly separated.
  16. */
  17. #include <common/geomprocs.h>
  18. #include <cgraph/gv_math.h>
  19. #include <dotgen/dot.h>
  20. #include <dotgen/aspect.h>
  21. #include <math.h>
  22. #include <stdbool.h>
  23. #include <stdlib.h>
  24. #include <util/alloc.h>
  25. static int nsiter2(graph_t * g);
  26. static void create_aux_edges(graph_t * g);
  27. static void remove_aux_edges(graph_t * g);
  28. static void set_xcoords(graph_t * g);
  29. static void set_ycoords(graph_t * g);
  30. static void set_aspect(graph_t *g);
  31. static void expand_leaves(graph_t * g);
  32. static void make_lrvn(graph_t * g);
  33. static void contain_nodes(graph_t * g);
  34. static bool idealsize(graph_t * g, double);
  35. #if defined(DEBUG) && DEBUG > 1
  36. static void
  37. dumpNS (graph_t * g)
  38. {
  39. node_t* n = GD_nlist(g);
  40. elist el;
  41. edge_t* e;
  42. while (n) {
  43. el = ND_out(n);
  44. for (size_t i = 0; i < el.size; i++) {
  45. e = el.list[i];
  46. fprintf (stderr, "%s(%x) -> ", agnameof(agtail(e)),agtail(e));
  47. fprintf (stderr, "%s(%x) : %d\n", agnameof(aghead(e)), aghead(e),
  48. ED_minlen(e));
  49. }
  50. n = ND_next(n);
  51. }
  52. }
  53. #endif
  54. static double
  55. largeMinlen (double l)
  56. {
  57. agerrorf(
  58. "Edge length %f larger than maximum %d allowed.\nCheck for overwide "
  59. "node(s).\n",
  60. l, INT_MAX);
  61. return (double)INT_MAX;
  62. }
  63. /* When source and/or sink nodes are defined, it is possible that
  64. * after the auxiliary edges are added, the graph may still have 2 or
  65. * 3 components. To fix this, we put trivial constraints connecting the
  66. * first items of each rank.
  67. */
  68. static void
  69. connectGraph (graph_t* g)
  70. {
  71. int i, j, r;
  72. bool found;
  73. node_t* tp;
  74. node_t* hp;
  75. node_t* sn;
  76. edge_t* e;
  77. rank_t* rp;
  78. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  79. rp = GD_rank(g)+r;
  80. found = false;
  81. tp = NULL;
  82. for (i = 0; i < rp->n; i++) {
  83. tp = rp->v[i];
  84. if (ND_save_out(tp).list) {
  85. for (j = 0; (e = ND_save_out(tp).list[j]); j++) {
  86. if (ND_rank(aghead(e)) > r || ND_rank(agtail(e)) > r) {
  87. found = true;
  88. break;
  89. }
  90. }
  91. if (found) break;
  92. }
  93. if (ND_save_in(tp).list) {
  94. for (j = 0; (e = ND_save_in(tp).list[j]); j++) {
  95. if (ND_rank(agtail(e)) > r || ND_rank(aghead(e)) > r) {
  96. found = true;
  97. break;
  98. }
  99. }
  100. if (found) break;
  101. }
  102. }
  103. if (found || !tp) continue;
  104. tp = rp->v[0];
  105. if (r < GD_maxrank(g)) hp = (rp+1)->v[0];
  106. else hp = (rp-1)->v[0];
  107. assert (hp);
  108. sn = virtual_node(g);
  109. ND_node_type(sn) = SLACKNODE;
  110. make_aux_edge(sn, tp, 0, 0);
  111. make_aux_edge(sn, hp, 0, 0);
  112. ND_rank(sn) = MIN(ND_rank(tp), ND_rank(hp));
  113. }
  114. }
  115. void dot_position(graph_t *g) {
  116. if (GD_nlist(g) == NULL)
  117. return; /* ignore empty graph */
  118. mark_lowclusters(g); /* we could remove from splines.c now */
  119. set_ycoords(g);
  120. if (Concentrate)
  121. dot_concentrate(g);
  122. expand_leaves(g);
  123. if (flat_edges(g))
  124. set_ycoords(g);
  125. create_aux_edges(g);
  126. if (rank(g, 2, nsiter2(g))) { /* LR balance == 2 */
  127. connectGraph (g);
  128. const int rank_result = rank(g, 2, nsiter2(g));
  129. assert(rank_result == 0);
  130. (void)rank_result;
  131. }
  132. set_xcoords(g);
  133. set_aspect(g);
  134. remove_aux_edges(g); /* must come after set_aspect since we now
  135. * use GD_ln and GD_rn for bbox width.
  136. */
  137. }
  138. static int nsiter2(graph_t * g)
  139. {
  140. int maxiter = INT_MAX;
  141. char *s;
  142. if ((s = agget(g, "nslimit")))
  143. maxiter = scale_clamp(agnnodes(g), atof(s));
  144. return maxiter;
  145. }
  146. static bool go(node_t *u, node_t *v) {
  147. int i;
  148. edge_t *e;
  149. if (u == v)
  150. return true;
  151. for (i = 0; (e = ND_out(u).list[i]); i++) {
  152. if (go(aghead(e), v))
  153. return true;
  154. }
  155. return false;
  156. }
  157. static bool canreach(node_t *u, node_t *v) {
  158. return go(u, v);
  159. }
  160. edge_t *make_aux_edge(node_t * u, node_t * v, double len, int wt)
  161. {
  162. edge_t *e;
  163. Agedgepair_t* e2 = gv_alloc(sizeof(Agedgepair_t));
  164. AGTYPE(&(e2->in)) = AGINEDGE;
  165. AGTYPE(&(e2->out)) = AGOUTEDGE;
  166. e2->out.base.data = gv_alloc(sizeof(Agedgeinfo_t));
  167. e = &(e2->out);
  168. agtail(e) = u;
  169. aghead(e) = v;
  170. if (len > INT_MAX)
  171. len = largeMinlen (len);
  172. ED_minlen(e) = ROUND(len);
  173. ED_weight(e) = wt;
  174. fast_edge(e);
  175. return e;
  176. }
  177. static void allocate_aux_edges(graph_t * g)
  178. {
  179. int i, j, n_in;
  180. node_t *n;
  181. /* allocate space for aux edge lists */
  182. for (n = GD_nlist(g); n; n = ND_next(n)) {
  183. ND_save_in(n) = ND_in(n);
  184. ND_save_out(n) = ND_out(n);
  185. for (i = 0; ND_out(n).list[i]; i++);
  186. for (j = 0; ND_in(n).list[j]; j++);
  187. n_in = i + j;
  188. alloc_elist(n_in + 3, ND_in(n));
  189. alloc_elist(3, ND_out(n));
  190. }
  191. }
  192. static void
  193. make_LR_constraints(graph_t * g)
  194. {
  195. int i, j;
  196. int m0;
  197. double width;
  198. int sep[2];
  199. int nodesep; /* separation between nodes on same rank */
  200. edge_t *e, *e0, *e1, *ff;
  201. node_t *u, *v, *t0, *h0;
  202. rank_t *rank = GD_rank(g);
  203. /* Use smaller separation on odd ranks if g has edge labels */
  204. if (GD_has_labels(g->root) & EDGE_LABEL) {
  205. sep[0] = GD_nodesep(g);
  206. sep[1] = 5;
  207. }
  208. else {
  209. sep[1] = sep[0] = GD_nodesep(g);
  210. }
  211. /* make edges to constrain left-to-right ordering */
  212. for (i = GD_minrank(g); i <= GD_maxrank(g); i++) {
  213. double last;
  214. last = ND_rank(rank[i].v[0]) = 0;
  215. nodesep = sep[i & 1];
  216. for (j = 0; j < rank[i].n; j++) {
  217. u = rank[i].v[j];
  218. ND_mval(u) = ND_rw(u); /* keep it somewhere safe */
  219. if (ND_other(u).size > 0) { /* compute self size */
  220. /* FIX: dot assumes all self-edges go to the right. This
  221. * is no longer true, though makeSelfEdge still attempts to
  222. * put as many as reasonable on the right. The dot code
  223. * should be modified to allow a box reflecting the placement
  224. * of all self-edges, and use that to reposition the nodes.
  225. * Note that this would not only affect left and right
  226. * positioning but may also affect interrank spacing.
  227. */
  228. double sw = 0; // self width
  229. for (size_t k = 0; (e = ND_other(u).list[k]); k++) {
  230. if (agtail(e) == aghead(e)) {
  231. sw += selfRightSpace (e);
  232. }
  233. }
  234. ND_rw(u) += sw; /* increment to include self edges */
  235. }
  236. v = rank[i].v[j + 1];
  237. if (v) {
  238. width = ND_rw(u) + ND_lw(v) + nodesep;
  239. e0 = make_aux_edge(u, v, width, 0);
  240. last = (ND_rank(v) = last + width);
  241. }
  242. /* constraints from labels of flat edges on previous rank */
  243. if ((e = ND_alg(u))) {
  244. e0 = ND_save_out(u).list[0];
  245. e1 = ND_save_out(u).list[1];
  246. if (ND_order(aghead(e0)) > ND_order(aghead(e1))) {
  247. ff = e0;
  248. e0 = e1;
  249. e1 = ff;
  250. }
  251. m0 = ED_minlen(e) * GD_nodesep(g) / 2;
  252. double m1 = m0 + ND_rw(aghead(e0)) + ND_lw(agtail(e0));
  253. /* these guards are needed because the flat edges
  254. * work very poorly with cluster layout */
  255. if (!canreach(agtail(e0), aghead(e0)))
  256. make_aux_edge(aghead(e0), agtail(e0), m1,
  257. ED_weight(e));
  258. m1 = m0 + ND_rw(agtail(e1)) + ND_lw(aghead(e1));
  259. if (!canreach(aghead(e1), agtail(e1)))
  260. make_aux_edge(agtail(e1), aghead(e1), m1,
  261. ED_weight(e));
  262. }
  263. /* position flat edge endpoints */
  264. for (size_t k = 0; k < ND_flat_out(u).size; k++) {
  265. e = ND_flat_out(u).list[k];
  266. if (ND_order(agtail(e)) < ND_order(aghead(e))) {
  267. t0 = agtail(e);
  268. h0 = aghead(e);
  269. } else {
  270. t0 = aghead(e);
  271. h0 = agtail(e);
  272. }
  273. width = ND_rw(t0) + ND_lw(h0);
  274. m0 = ED_minlen(e) * GD_nodesep(g) + width;
  275. if ((e0 = find_fast_edge(t0, h0))) {
  276. /* flat edge between adjacent neighbors
  277. * ED_dist contains the largest label width.
  278. */
  279. m0 = MAX(m0, width + GD_nodesep(g) + ROUND(ED_dist(e)));
  280. ED_minlen(e0) = MAX(ED_minlen(e0), m0);
  281. ED_weight(e0) = MAX(ED_weight(e0), ED_weight(e));
  282. }
  283. else if (!ED_label(e)) {
  284. /* unlabeled flat edge between non-neighbors
  285. * ED_minlen(e) is max of ED_minlen of all equivalent
  286. * edges.
  287. */
  288. make_aux_edge(t0, h0, m0, ED_weight(e));
  289. }
  290. /* labeled flat edges between non-neighbors have already
  291. * been constrained by the label above.
  292. */
  293. }
  294. }
  295. }
  296. }
  297. /// make virtual edge pairs corresponding to input edges
  298. static void make_edge_pairs(graph_t * g)
  299. {
  300. int i, m0, m1;
  301. node_t *n, *sn;
  302. edge_t *e;
  303. for (n = GD_nlist(g); n; n = ND_next(n)) {
  304. if (ND_save_out(n).list)
  305. for (i = 0; (e = ND_save_out(n).list[i]); i++) {
  306. sn = virtual_node(g);
  307. ND_node_type(sn) = SLACKNODE;
  308. m0 = (ED_head_port(e).p.x - ED_tail_port(e).p.x);
  309. if (m0 > 0)
  310. m1 = 0;
  311. else {
  312. m1 = -m0;
  313. m0 = 0;
  314. }
  315. make_aux_edge(sn, agtail(e), m0 + 1, ED_weight(e));
  316. make_aux_edge(sn, aghead(e), m1 + 1, ED_weight(e));
  317. ND_rank(sn) =
  318. MIN(ND_rank(agtail(e)) - m0 - 1,
  319. ND_rank(aghead(e)) - m1 - 1);
  320. }
  321. }
  322. }
  323. static void contain_clustnodes(graph_t * g)
  324. {
  325. int c;
  326. edge_t *e;
  327. if (g != dot_root(g)) {
  328. contain_nodes(g);
  329. if ((e = find_fast_edge(GD_ln(g),GD_rn(g)))) /* maybe from lrvn()?*/
  330. ED_weight(e) += 128;
  331. else
  332. make_aux_edge(GD_ln(g), GD_rn(g), 1, 128); /* clust compaction edge */
  333. }
  334. for (c = 1; c <= GD_n_cluster(g); c++)
  335. contain_clustnodes(GD_clust(g)[c]);
  336. }
  337. static bool vnode_not_related_to(graph_t *g, node_t *v) {
  338. edge_t *e;
  339. if (ND_node_type(v) != VIRTUAL)
  340. return false;
  341. for (e = ND_save_out(v).list[0]; ED_to_orig(e); e = ED_to_orig(e));
  342. if (agcontains(g, agtail(e)))
  343. return false;
  344. if (agcontains(g, aghead(e)))
  345. return false;
  346. return true;
  347. }
  348. /* Guarantee nodes outside the cluster g are placed outside of it.
  349. * This is done by adding constraints to make sure such nodes have
  350. * a gap of margin from the left or right bounding box node ln or rn.
  351. *
  352. * We could probably reduce some of these constraints by checking if
  353. * the node is in a cluster, since elsewhere we make constrain a
  354. * separate between clusters. Also, we should be able to skip the
  355. * first loop if g is the root graph.
  356. */
  357. static void keepout_othernodes(graph_t * g)
  358. {
  359. int i, c, r, margin;
  360. node_t *u, *v;
  361. margin = late_int (g, G_margin, CL_OFFSET, 0);
  362. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  363. if (GD_rank(g)[r].n == 0)
  364. continue;
  365. v = GD_rank(g)[r].v[0];
  366. if (v == NULL)
  367. continue;
  368. for (i = ND_order(v) - 1; i >= 0; i--) {
  369. u = GD_rank(dot_root(g))[r].v[i];
  370. /* can't use "is_a_vnode_of" because elists are swapped */
  371. if (ND_node_type(u) == NORMAL || vnode_not_related_to(g, u)) {
  372. make_aux_edge(u, GD_ln(g), margin + ND_rw(u), 0);
  373. break;
  374. }
  375. }
  376. for (i = ND_order(v) + GD_rank(g)[r].n; i < GD_rank(dot_root(g))[r].n;
  377. i++) {
  378. u = GD_rank(dot_root(g))[r].v[i];
  379. if (ND_node_type(u) == NORMAL || vnode_not_related_to(g, u)) {
  380. make_aux_edge(GD_rn(g), u, margin + ND_lw(u), 0);
  381. break;
  382. }
  383. }
  384. }
  385. for (c = 1; c <= GD_n_cluster(g); c++)
  386. keepout_othernodes(GD_clust(g)[c]);
  387. }
  388. /* Make sure boxes of subclusters of g are offset from the
  389. * box of g. This is done by a constraint between the left and
  390. * right bounding box nodes ln and rn of g and a subcluster.
  391. * The gap needs to include any left or right labels.
  392. */
  393. static void contain_subclust(graph_t * g)
  394. {
  395. int margin, c;
  396. graph_t *subg;
  397. margin = late_int (g, G_margin, CL_OFFSET, 0);
  398. make_lrvn(g);
  399. for (c = 1; c <= GD_n_cluster(g); c++) {
  400. subg = GD_clust(g)[c];
  401. make_lrvn(subg);
  402. make_aux_edge(GD_ln(g), GD_ln(subg),
  403. margin + GD_border(g)[LEFT_IX].x, 0);
  404. make_aux_edge(GD_rn(subg), GD_rn(g),
  405. margin + GD_border(g)[RIGHT_IX].x, 0);
  406. contain_subclust(subg);
  407. }
  408. }
  409. /* Guarantee space between subcluster of g.
  410. * This is done by adding a constraint between the right bbox node rn
  411. * of the left cluster and the left bbox node ln of the right cluster.
  412. * This is only done if the two clusters overlap in some rank.
  413. */
  414. static void separate_subclust(graph_t * g)
  415. {
  416. int i, j, margin;
  417. graph_t *low, *high;
  418. graph_t *left, *right;
  419. margin = late_int (g, G_margin, CL_OFFSET, 0);
  420. for (i = 1; i <= GD_n_cluster(g); i++)
  421. make_lrvn(GD_clust(g)[i]);
  422. for (i = 1; i <= GD_n_cluster(g); i++) {
  423. for (j = i + 1; j <= GD_n_cluster(g); j++) {
  424. low = GD_clust(g)[i];
  425. high = GD_clust(g)[j];
  426. if (GD_minrank(low) > GD_minrank(high)) {
  427. graph_t *temp = low;
  428. low = high;
  429. high = temp;
  430. }
  431. if (GD_maxrank(low) < GD_minrank(high))
  432. continue;
  433. if (ND_order(GD_rank(low)[GD_minrank(high)].v[0])
  434. < ND_order(GD_rank(high)[GD_minrank(high)].v[0])) {
  435. left = low;
  436. right = high;
  437. } else {
  438. left = high;
  439. right = low;
  440. }
  441. make_aux_edge(GD_rn(left), GD_ln(right), margin, 0);
  442. }
  443. separate_subclust(GD_clust(g)[i]);
  444. }
  445. }
  446. /* create constraints for:
  447. * node containment in clusters,
  448. * cluster containment in clusters,
  449. * separation of sibling clusters.
  450. */
  451. static void pos_clusters(graph_t * g)
  452. {
  453. if (GD_n_cluster(g) > 0) {
  454. contain_clustnodes(g);
  455. keepout_othernodes(g);
  456. contain_subclust(g);
  457. separate_subclust(g);
  458. }
  459. }
  460. static void compress_graph(graph_t * g)
  461. {
  462. double x;
  463. pointf p;
  464. if (GD_drawing(g)->ratio_kind != R_COMPRESS)
  465. return;
  466. p = GD_drawing(g)->size;
  467. if (p.x * p.y <= 1)
  468. return;
  469. contain_nodes(g);
  470. if (!GD_flip(g))
  471. x = p.x;
  472. else
  473. x = p.y;
  474. /* Guard against huge size attribute since max. edge length is USHRT_MAX
  475. * A warning might be called for. Also, one could check that the graph
  476. * already fits GD_drawing(g)->size and return immediately.
  477. */
  478. x = MIN(x,USHRT_MAX);
  479. make_aux_edge(GD_ln(g), GD_rn(g), x, 1000);
  480. }
  481. static void create_aux_edges(graph_t * g)
  482. {
  483. allocate_aux_edges(g);
  484. make_LR_constraints(g);
  485. make_edge_pairs(g);
  486. pos_clusters(g);
  487. compress_graph(g);
  488. }
  489. static void remove_aux_edges(graph_t * g)
  490. {
  491. int i;
  492. node_t *n, *nnext, *nprev;
  493. edge_t *e;
  494. for (n = GD_nlist(g); n; n = ND_next(n)) {
  495. for (i = 0; (e = ND_out(n).list[i]); i++) {
  496. free(e->base.data);
  497. free(e);
  498. }
  499. free_list(ND_out(n));
  500. free_list(ND_in(n));
  501. ND_out(n) = ND_save_out(n);
  502. ND_in(n) = ND_save_in(n);
  503. }
  504. /* cannot be merged with previous loop */
  505. nprev = NULL;
  506. for (n = GD_nlist(g); n; n = nnext) {
  507. nnext = ND_next(n);
  508. if (ND_node_type(n) == SLACKNODE) {
  509. if (nprev)
  510. ND_next(nprev) = nnext;
  511. else
  512. GD_nlist(g) = nnext;
  513. free(n->base.data);
  514. free(n);
  515. } else
  516. nprev = n;
  517. }
  518. ND_prev(GD_nlist(g)) = NULL;
  519. }
  520. /// Set x coords of nodes.
  521. static void
  522. set_xcoords(graph_t * g)
  523. {
  524. int i, j;
  525. node_t *v;
  526. rank_t *rank = GD_rank(g);
  527. for (i = GD_minrank(g); i <= GD_maxrank(g); i++) {
  528. for (j = 0; j < rank[i].n; j++) {
  529. v = rank[i].v[j];
  530. ND_coord(v).x = ND_rank(v);
  531. ND_rank(v) = i;
  532. }
  533. }
  534. }
  535. /* Expand cluster height by delta, adding half to top
  536. * and half to bottom. If the bottom expansion exceeds the
  537. * ht1 of the rank, shift the ranks in the cluster up.
  538. * If the top expansion, including any shift from the bottom
  539. * expansion, exceeds to ht2 of the rank, shift the ranks above
  540. * the cluster up.
  541. *
  542. * FIX: There can be excess space between ranks. Not sure where this is
  543. * coming from but it could be cleaned up.
  544. */
  545. static void adjustSimple(graph_t *g, double delta, int margin_total) {
  546. int r;
  547. double deltop;
  548. graph_t *root = dot_root(g);
  549. rank_t *rank = GD_rank(root);
  550. int maxr = GD_maxrank(g);
  551. int minr = GD_minrank(g);
  552. const double bottom = (delta + 1) / 2;
  553. const double delbottom = GD_ht1(g) + bottom - (rank[maxr].ht1 - margin_total);
  554. if (delbottom > 0) {
  555. for (r = maxr; r >= minr; r--) {
  556. if (rank[r].n > 0)
  557. ND_coord(rank[r].v[0]).y += delbottom;
  558. }
  559. deltop = GD_ht2(g) + (delta-bottom) + delbottom - (rank[minr].ht2 - margin_total);
  560. }
  561. else
  562. deltop = GD_ht2(g) + (delta-bottom) - (rank[minr].ht2 - margin_total);
  563. if (deltop > 0) {
  564. for (r = minr-1; r >= GD_minrank(root); r--) {
  565. if (rank[r].n > 0)
  566. ND_coord(rank[r].v[0]).y += deltop;
  567. }
  568. }
  569. GD_ht2(g) += delta - bottom;
  570. GD_ht1(g) += bottom;
  571. }
  572. /* Recursively adjust ranks to take into account
  573. * wide cluster labels when rankdir=LR.
  574. * We divide the extra space between the top and bottom.
  575. * Adjust the ht1 and ht2 values in the process.
  576. */
  577. static void adjustRanks(graph_t * g, int margin_total)
  578. {
  579. double lht; /* label height */
  580. double rht; /* height between top and bottom ranks */
  581. int maxr, minr, margin;
  582. int c;
  583. double delta, ht1, ht2;
  584. rank_t *rank = GD_rank(dot_root(g));
  585. if (g == dot_root(g))
  586. margin = 0;
  587. else
  588. margin = late_int (g, G_margin, CL_OFFSET, 0);
  589. ht1 = GD_ht1(g);
  590. ht2 = GD_ht2(g);
  591. for (c = 1; c <= GD_n_cluster(g); c++) {
  592. graph_t *subg = GD_clust(g)[c];
  593. adjustRanks(subg, margin+margin_total);
  594. if (GD_maxrank(subg) == GD_maxrank(g))
  595. ht1 = fmax(ht1, GD_ht1(subg) + margin);
  596. if (GD_minrank(subg) == GD_minrank(g))
  597. ht2 = fmax(ht2, GD_ht2(subg) + margin);
  598. }
  599. GD_ht1(g) = ht1;
  600. GD_ht2(g) = ht2;
  601. if (g != dot_root(g) && GD_label(g)) {
  602. lht = MAX(GD_border(g)[LEFT_IX].y, GD_border(g)[RIGHT_IX].y);
  603. maxr = GD_maxrank(g);
  604. minr = GD_minrank(g);
  605. rht = ND_coord(rank[minr].v[0]).y - ND_coord(rank[maxr].v[0]).y;
  606. delta = lht - (rht + ht1 + ht2);
  607. if (delta > 0) {
  608. adjustSimple(g, delta, margin_total);
  609. }
  610. }
  611. /* update the global ranks */
  612. if (g != dot_root(g)) {
  613. rank[GD_minrank(g)].ht2 = fmax(rank[GD_minrank(g)].ht2, GD_ht2(g));
  614. rank[GD_maxrank(g)].ht1 = fmax(rank[GD_maxrank(g)].ht1, GD_ht1(g));
  615. }
  616. }
  617. /* recursively compute cluster ht requirements. assumes GD_ht1(subg) and ht2
  618. * are computed from primitive nodes only. updates ht1 and ht2 to reflect
  619. * cluster nesting and labels. also maintains global rank ht1 and ht2.
  620. * Return true if some cluster has a label.
  621. */
  622. static int clust_ht(Agraph_t * g)
  623. {
  624. int c;
  625. double ht1, ht2;
  626. graph_t *subg;
  627. rank_t *rank = GD_rank(dot_root(g));
  628. int margin, haveClustLabel = 0;
  629. if (g == dot_root(g))
  630. margin = CL_OFFSET;
  631. else
  632. margin = late_int (g, G_margin, CL_OFFSET, 0);
  633. ht1 = GD_ht1(g);
  634. ht2 = GD_ht2(g);
  635. /* account for sub-clusters */
  636. for (c = 1; c <= GD_n_cluster(g); c++) {
  637. subg = GD_clust(g)[c];
  638. haveClustLabel |= clust_ht(subg);
  639. if (GD_maxrank(subg) == GD_maxrank(g))
  640. ht1 = MAX(ht1, GD_ht1(subg) + margin);
  641. if (GD_minrank(subg) == GD_minrank(g))
  642. ht2 = MAX(ht2, GD_ht2(subg) + margin);
  643. }
  644. /* account for a possible cluster label in clusters */
  645. /* room for root graph label is handled in dotneato_postprocess */
  646. if ((g != dot_root(g)) && GD_label(g)) {
  647. haveClustLabel = 1;
  648. if (!GD_flip(agroot(g))) {
  649. ht1 += GD_border(g)[BOTTOM_IX].y;
  650. ht2 += GD_border(g)[TOP_IX].y;
  651. }
  652. }
  653. GD_ht1(g) = ht1;
  654. GD_ht2(g) = ht2;
  655. /* update the global ranks */
  656. if (g != dot_root(g)) {
  657. rank[GD_minrank(g)].ht2 = MAX(rank[GD_minrank(g)].ht2, ht2);
  658. rank[GD_maxrank(g)].ht1 = MAX(rank[GD_maxrank(g)].ht1, ht1);
  659. }
  660. return haveClustLabel;
  661. }
  662. /* set y coordinates of nodes, a rank at a time */
  663. static void set_ycoords(graph_t * g)
  664. {
  665. int i, j, r;
  666. double ht2, maxht, delta, d0, d1;
  667. node_t *n;
  668. edge_t *e;
  669. rank_t *rank = GD_rank(g);
  670. graph_t *clust;
  671. int lbl;
  672. ht2 = maxht = 0;
  673. /* scan ranks for tallest nodes. */
  674. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  675. for (i = 0; i < rank[r].n; i++) {
  676. n = rank[r].v[i];
  677. /* assumes symmetry, ht1 = ht2 */
  678. ht2 = ND_ht(n) / 2;
  679. /* have to look for high self-edge labels, too */
  680. if (ND_other(n).list)
  681. for (j = 0; (e = ND_other(n).list[j]); j++) {
  682. if (agtail(e) == aghead(e)) {
  683. if (ED_label(e))
  684. ht2 = fmax(ht2, ED_label(e)->dimen.y / 2);
  685. }
  686. }
  687. /* update global rank ht */
  688. if (rank[r].pht2 < ht2)
  689. rank[r].pht2 = rank[r].ht2 = ht2;
  690. if (rank[r].pht1 < ht2)
  691. rank[r].pht1 = rank[r].ht1 = ht2;
  692. /* update nearest enclosing cluster rank ht */
  693. if ((clust = ND_clust(n))) {
  694. int yoff = (clust == g ? 0 : late_int (clust, G_margin, CL_OFFSET, 0));
  695. if (ND_rank(n) == GD_minrank(clust))
  696. GD_ht2(clust) = fmax(GD_ht2(clust), ht2 + yoff);
  697. if (ND_rank(n) == GD_maxrank(clust))
  698. GD_ht1(clust) = fmax(GD_ht1(clust), ht2 + yoff);
  699. }
  700. }
  701. }
  702. /* scan sub-clusters */
  703. lbl = clust_ht(g);
  704. /* make the initial assignment of ycoords to leftmost nodes by ranks */
  705. maxht = 0;
  706. r = GD_maxrank(g);
  707. (ND_coord(rank[r].v[0])).y = rank[r].ht1;
  708. while (--r >= GD_minrank(g)) {
  709. d0 = rank[r + 1].pht2 + rank[r].pht1 + GD_ranksep(g); /* prim node sep */
  710. d1 = rank[r + 1].ht2 + rank[r].ht1 + CL_OFFSET; /* cluster sep */
  711. delta = fmax(d0, d1);
  712. if (rank[r].n > 0) /* this may reflect some problem */
  713. (ND_coord(rank[r].v[0])).y = (ND_coord(rank[r + 1].v[0])).y + delta;
  714. #ifdef DEBUG
  715. else
  716. fprintf(stderr, "dot set_ycoords: rank %d is empty\n",
  717. rank[r].n);
  718. #endif
  719. maxht = fmax(maxht, delta);
  720. }
  721. /* If there are cluster labels and the drawing is rotated, we need special processing to
  722. * allocate enough room. We use adjustRanks for this, and then recompute the maxht if
  723. * the ranks are to be equally spaced. This seems simpler and appears to work better than
  724. * handling equal spacing as a special case.
  725. */
  726. if (lbl && GD_flip(g)) {
  727. adjustRanks(g, 0);
  728. if (GD_exact_ranksep(g)) { /* recompute maxht */
  729. maxht = 0;
  730. r = GD_maxrank(g);
  731. d0 = (ND_coord(rank[r].v[0])).y;
  732. while (--r >= GD_minrank(g)) {
  733. d1 = (ND_coord(rank[r].v[0])).y;
  734. delta = d1 - d0;
  735. maxht = fmax(maxht, delta);
  736. d0 = d1;
  737. }
  738. }
  739. }
  740. /* re-assign if ranks are equally spaced */
  741. if (GD_exact_ranksep(g)) {
  742. for (r = GD_maxrank(g) - 1; r >= GD_minrank(g); r--)
  743. if (rank[r].n > 0) /* this may reflect the same problem :-() */
  744. (ND_coord(rank[r].v[0])).y=
  745. (ND_coord(rank[r + 1].v[0])).y + maxht;
  746. }
  747. /* copy ycoord assignment from leftmost nodes to others */
  748. for (n = GD_nlist(g); n; n = ND_next(n))
  749. ND_coord(n).y = (ND_coord(rank[ND_rank(n)].v[0])).y;
  750. }
  751. /* Compute bounding box of g.
  752. * The x limits of clusters are given by the x positions of ln and rn.
  753. * This information is stored in the rank field, since it was calculated
  754. * using network simplex.
  755. * For the root graph, we don't enforce all the constraints on lr and
  756. * rn, so we traverse the nodes and subclusters.
  757. */
  758. static void dot_compute_bb(graph_t * g, graph_t * root)
  759. {
  760. int r, c;
  761. double x, offset;
  762. node_t *v;
  763. pointf LL, UR;
  764. if (g == dot_root(g)) {
  765. LL.x = (double)INT_MAX;
  766. UR.x = (double)-INT_MAX;
  767. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  768. int rnkn = GD_rank(g)[r].n;
  769. if (rnkn == 0)
  770. continue;
  771. if ((v = GD_rank(g)[r].v[0]) == NULL)
  772. continue;
  773. for (c = 1; (ND_node_type(v) != NORMAL) && c < rnkn; c++)
  774. v = GD_rank(g)[r].v[c];
  775. if (ND_node_type(v) == NORMAL) {
  776. x = ND_coord(v).x - ND_lw(v);
  777. LL.x = MIN(LL.x, x);
  778. }
  779. else continue;
  780. /* At this point, we know the rank contains a NORMAL node */
  781. v = GD_rank(g)[r].v[rnkn - 1];
  782. for (c = rnkn-2; ND_node_type(v) != NORMAL; c--)
  783. v = GD_rank(g)[r].v[c];
  784. x = ND_coord(v).x + ND_rw(v);
  785. UR.x = MAX(UR.x, x);
  786. }
  787. offset = CL_OFFSET;
  788. for (c = 1; c <= GD_n_cluster(g); c++) {
  789. x = (double)(GD_bb(GD_clust(g)[c]).LL.x - offset);
  790. LL.x = MIN(LL.x, x);
  791. x = (double)(GD_bb(GD_clust(g)[c]).UR.x + offset);
  792. UR.x = MAX(UR.x, x);
  793. }
  794. } else {
  795. LL.x = (double)(ND_rank(GD_ln(g)));
  796. UR.x = (double)(ND_rank(GD_rn(g)));
  797. }
  798. LL.y = ND_coord(GD_rank(root)[GD_maxrank(g)].v[0]).y - GD_ht1(g);
  799. UR.y = ND_coord(GD_rank(root)[GD_minrank(g)].v[0]).y + GD_ht2(g);
  800. GD_bb(g).LL = LL;
  801. GD_bb(g).UR = UR;
  802. }
  803. static void rec_bb(graph_t * g, graph_t * root)
  804. {
  805. int c;
  806. for (c = 1; c <= GD_n_cluster(g); c++)
  807. rec_bb(GD_clust(g)[c], root);
  808. dot_compute_bb(g, root);
  809. }
  810. /* Recursively rescale all bounding boxes using scale factors
  811. * xf and yf. We assume all the bboxes have been computed.
  812. */
  813. static void scale_bb(graph_t *g, double xf, double yf) {
  814. int c;
  815. for (c = 1; c <= GD_n_cluster(g); c++)
  816. scale_bb(GD_clust(g)[c], xf, yf);
  817. GD_bb(g).LL.x *= xf;
  818. GD_bb(g).LL.y *= yf;
  819. GD_bb(g).UR.x *= xf;
  820. GD_bb(g).UR.y *= yf;
  821. }
  822. /* Set bounding boxes and, if ratio is set, rescale graph.
  823. * Note that if some dimension shrinks, there may be problems
  824. * with labels.
  825. */
  826. static void set_aspect(graph_t *g) {
  827. double xf = 0.0, yf = 0.0, actual, desired;
  828. node_t *n;
  829. bool filled;
  830. rec_bb(g, g);
  831. if (GD_maxrank(g) > 0 && GD_drawing(g)->ratio_kind) {
  832. pointf sz = sub_pointf(GD_bb(g).UR, GD_bb(g).LL); // normalize
  833. if (GD_flip(g)) {
  834. sz = exch_xyf(sz);
  835. }
  836. bool scale_it = true;
  837. if (GD_drawing(g)->ratio_kind == R_AUTO)
  838. filled = idealsize(g, .5);
  839. else
  840. filled = GD_drawing(g)->ratio_kind == R_FILL;
  841. if (filled) {
  842. /* fill is weird because both X and Y can stretch */
  843. if (GD_drawing(g)->size.x <= 0)
  844. scale_it = false;
  845. else {
  846. xf = GD_drawing(g)->size.x / sz.x;
  847. yf = GD_drawing(g)->size.y / sz.y;
  848. if (xf < 1.0 || yf < 1.0) {
  849. if (xf < yf) {
  850. yf /= xf;
  851. xf = 1.0;
  852. } else {
  853. xf /= yf;
  854. yf = 1.0;
  855. }
  856. }
  857. }
  858. } else if (GD_drawing(g)->ratio_kind == R_EXPAND) {
  859. if (GD_drawing(g)->size.x <= 0)
  860. scale_it = false;
  861. else {
  862. xf = GD_drawing(g)->size.x / GD_bb(g).UR.x;
  863. yf = GD_drawing(g)->size.y / GD_bb(g).UR.y;
  864. if (xf > 1.0 && yf > 1.0) {
  865. const double scale = fmin(xf, yf);
  866. xf = yf = scale;
  867. } else
  868. scale_it = false;
  869. }
  870. } else if (GD_drawing(g)->ratio_kind == R_VALUE) {
  871. desired = GD_drawing(g)->ratio;
  872. actual = sz.y / sz.x;
  873. if (actual < desired) {
  874. yf = desired / actual;
  875. xf = 1.0;
  876. } else {
  877. xf = actual / desired;
  878. yf = 1.0;
  879. }
  880. } else
  881. scale_it = false;
  882. if (scale_it) {
  883. if (GD_flip(g)) {
  884. double t = xf;
  885. xf = yf;
  886. yf = t;
  887. }
  888. for (n = GD_nlist(g); n; n = ND_next(n)) {
  889. ND_coord(n).x = round(ND_coord(n).x * xf);
  890. ND_coord(n).y = round(ND_coord(n).y * yf);
  891. }
  892. scale_bb(g, xf, yf);
  893. }
  894. }
  895. }
  896. /* make space for the leaf nodes of each rank */
  897. static void make_leafslots(graph_t * g)
  898. {
  899. int i, j, r;
  900. node_t *v;
  901. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  902. j = 0;
  903. for (i = 0; i < GD_rank(g)[r].n; i++) {
  904. v = GD_rank(g)[r].v[i];
  905. ND_order(v) = j;
  906. if (ND_ranktype(v) == LEAFSET)
  907. j = j + ND_UF_size(v);
  908. else
  909. j++;
  910. }
  911. if (j <= GD_rank(g)[r].n)
  912. continue;
  913. node_t **new_v = gv_calloc(j + 1, sizeof(node_t*));
  914. for (i = GD_rank(g)[r].n - 1; i >= 0; i--) {
  915. v = GD_rank(g)[r].v[i];
  916. new_v[ND_order(v)] = v;
  917. }
  918. GD_rank(g)[r].n = j;
  919. new_v[j] = NULL;
  920. free(GD_rank(g)[r].v);
  921. GD_rank(g)[r].v = new_v;
  922. }
  923. }
  924. int ports_eq(edge_t * e, edge_t * f)
  925. {
  926. return ED_head_port(e).defined == ED_head_port(f).defined
  927. && ((ED_head_port(e).p.x == ED_head_port(f).p.x &&
  928. ED_head_port(e).p.y == ED_head_port(f).p.y)
  929. || !ED_head_port(e).defined)
  930. && ((ED_tail_port(e).p.x == ED_tail_port(f).p.x &&
  931. ED_tail_port(e).p.y == ED_tail_port(f).p.y)
  932. || !ED_tail_port(e).defined);
  933. }
  934. static void expand_leaves(graph_t * g)
  935. {
  936. int i, d;
  937. node_t *n;
  938. edge_t *e, *f;
  939. make_leafslots(g);
  940. for (n = GD_nlist(g); n; n = ND_next(n)) {
  941. if (ND_other(n).list)
  942. for (i = 0; (e = ND_other(n).list[i]); i++) {
  943. if ((d = ND_rank(aghead(e)) - ND_rank(aghead(e))) == 0)
  944. continue;
  945. f = ED_to_orig(e);
  946. if (!ports_eq(e, f)) {
  947. zapinlist(&(ND_other(n)), e);
  948. if (d == 1)
  949. fast_edge(e);
  950. /*else unitize(e); ### */
  951. i--;
  952. }
  953. }
  954. }
  955. }
  956. /* Add left and right slacknodes to a cluster which
  957. * are used in the LP to constrain nodes not in g but
  958. * sharing its ranks to be to the left or right of g
  959. * by a specified amount.
  960. * The slacknodes ln and rn give the x position of the
  961. * left and right side of the cluster's bounding box. In
  962. * particular, any cluster labels on the left or right side
  963. * are inside.
  964. * If a cluster has a label, and we have rankdir!=LR, we make
  965. * sure the cluster is wide enough for the label. Note that
  966. * if the label is wider than the cluster, the nodes in the
  967. * cluster may not be centered.
  968. */
  969. static void make_lrvn(graph_t * g)
  970. {
  971. node_t *ln, *rn;
  972. if (GD_ln(g))
  973. return;
  974. ln = virtual_node(dot_root(g));
  975. ND_node_type(ln) = SLACKNODE;
  976. rn = virtual_node(dot_root(g));
  977. ND_node_type(rn) = SLACKNODE;
  978. if (GD_label(g) && g != dot_root(g) && !GD_flip(agroot(g))) {
  979. int w = MAX(GD_border(g)[BOTTOM_IX].x, GD_border(g)[TOP_IX].x);
  980. make_aux_edge(ln, rn, w, 0);
  981. }
  982. GD_ln(g) = ln;
  983. GD_rn(g) = rn;
  984. }
  985. /* make left and right bounding box virtual nodes ln and rn
  986. * constrain interior nodes
  987. */
  988. static void contain_nodes(graph_t * g)
  989. {
  990. int margin, r;
  991. node_t *ln, *rn, *v;
  992. margin = late_int (g, G_margin, CL_OFFSET, 0);
  993. make_lrvn(g);
  994. ln = GD_ln(g);
  995. rn = GD_rn(g);
  996. for (r = GD_minrank(g); r <= GD_maxrank(g); r++) {
  997. if (GD_rank(g)[r].n == 0)
  998. continue;
  999. v = GD_rank(g)[r].v[0];
  1000. if (v == NULL) {
  1001. agerrorf("contain_nodes clust %s rank %d missing node\n",
  1002. agnameof(g), r);
  1003. continue;
  1004. }
  1005. make_aux_edge(ln, v,
  1006. ND_lw(v) + margin + GD_border(g)[LEFT_IX].x, 0);
  1007. v = GD_rank(g)[r].v[GD_rank(g)[r].n - 1];
  1008. make_aux_edge(v, rn,
  1009. ND_rw(v) + margin + GD_border(g)[RIGHT_IX].x, 0);
  1010. }
  1011. }
  1012. /* set g->drawing->size to a reasonable default.
  1013. * returns a boolean to indicate if drawing is to
  1014. * be scaled and filled */
  1015. static bool idealsize(graph_t * g, double minallowed)
  1016. {
  1017. double xf, yf, f, R;
  1018. pointf b, relpage, margin;
  1019. /* try for one page */
  1020. relpage = GD_drawing(g)->page;
  1021. if (relpage.x < 0.001 || relpage.y < 0.001)
  1022. return false; /* no page was specified */
  1023. margin = GD_drawing(g)->margin;
  1024. relpage = sub_pointf(relpage, margin);
  1025. relpage = sub_pointf(relpage, margin);
  1026. b.x = GD_bb(g).UR.x;
  1027. b.y = GD_bb(g).UR.y;
  1028. xf = relpage.x / b.x;
  1029. yf = relpage.y / b.y;
  1030. if (xf >= 1.0 && yf >= 1.0)
  1031. return false; /* fits on one page */
  1032. f = MIN(xf, yf);
  1033. xf = yf = MAX(f, minallowed);
  1034. R = ceil(xf * b.x / relpage.x);
  1035. xf = R * relpage.x / b.x;
  1036. R = ceil(yf * b.y / relpage.y);
  1037. yf = R * relpage.y / b.y;
  1038. GD_drawing(g)->size.x = b.x * xf;
  1039. GD_drawing(g)->size.y = b.y * yf;
  1040. return true;
  1041. }