smart_ini_x.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. /*************************************************************************
  2. * Copyright (c) 2011 AT&T Intellectual Property
  3. * All rights reserved. This program and the accompanying materials
  4. * are made available under the terms of the Eclipse Public License v1.0
  5. * which accompanies this distribution, and is available at
  6. * https://www.eclipse.org/legal/epl-v10.html
  7. *
  8. * Contributors: Details at https://graphviz.org
  9. *************************************************************************/
  10. #include <float.h>
  11. #include <math.h>
  12. #include <neatogen/digcola.h>
  13. #include <util/alloc.h>
  14. #ifdef DIGCOLA
  15. #include <neatogen/kkutils.h>
  16. #include <neatogen/matrix_ops.h>
  17. #include <neatogen/conjgrad.h>
  18. #include <stdbool.h>
  19. static void
  20. standardize(double* orthog, int nvtxs)
  21. {
  22. double len, avg = 0;
  23. int i;
  24. for (i=0; i<nvtxs; i++)
  25. avg+=orthog[i];
  26. avg/=nvtxs;
  27. /* centralize: */
  28. for (i=0; i<nvtxs; i++)
  29. orthog[i]-=avg;
  30. /* normalize: */
  31. len = norm(orthog, nvtxs-1);
  32. // if we have a degenerate length, do not attempt to scale by it
  33. if (fabs(len) < DBL_EPSILON) {
  34. return;
  35. }
  36. vectors_scalar_mult(nvtxs, orthog, 1.0 / len, orthog);
  37. }
  38. static void
  39. mat_mult_vec_orthog(float** mat, int dim1, int dim2, double* vec,
  40. double* result, double* orthog)
  41. {
  42. /* computes mat*vec, where mat is a dim1*dim2 matrix */
  43. int i,j;
  44. double sum;
  45. for (i=0; i<dim1; i++) {
  46. sum=0;
  47. for (j=0; j<dim2; j++) {
  48. sum += mat[i][j]*vec[j];
  49. }
  50. result[i]=sum;
  51. }
  52. assert(orthog != NULL);
  53. double alpha = -vectors_inner_product(dim1, result, orthog);
  54. scadd(result, dim1 - 1, alpha, orthog);
  55. }
  56. static void
  57. power_iteration_orthog(float** square_mat, int n, int neigs,
  58. double** eigs, double* evals, double* orthog, double p_iteration_threshold)
  59. {
  60. // Power-Iteration with
  61. // (I - orthog × orthogᵀ) × square_mat × (I - orthog × orthogᵀ)
  62. int i,j;
  63. double *tmp_vec = gv_calloc(n, sizeof(double));
  64. double *last_vec = gv_calloc(n, sizeof(double));
  65. double *curr_vector;
  66. double len;
  67. double angle;
  68. double alpha;
  69. int largest_index;
  70. double largest_eval;
  71. double tol=1-p_iteration_threshold;
  72. if (neigs>=n) {
  73. neigs=n;
  74. }
  75. for (i=0; i<neigs; i++) {
  76. curr_vector = eigs[i];
  77. /* guess the i-th eigen vector */
  78. choose:
  79. for (j=0; j<n; j++) {
  80. curr_vector[j] = rand()%100;
  81. }
  82. assert(orthog != NULL);
  83. alpha = -vectors_inner_product(n, orthog, curr_vector);
  84. scadd(curr_vector, n - 1, alpha, orthog);
  85. // orthogonalize against higher eigenvectors
  86. for (j=0; j<i; j++) {
  87. alpha = -vectors_inner_product(n, eigs[j], curr_vector);
  88. scadd(curr_vector, n-1, alpha, eigs[j]);
  89. }
  90. len = norm(curr_vector, n-1);
  91. if (len<1e-10) {
  92. /* We have chosen a vector colinear with prvious ones */
  93. goto choose;
  94. }
  95. vectors_scalar_mult(n, curr_vector, 1.0 / len, curr_vector);
  96. do {
  97. copy_vector(n, curr_vector, last_vec);
  98. mat_mult_vec_orthog(square_mat,n,n,curr_vector,tmp_vec,orthog);
  99. copy_vector(n, tmp_vec, curr_vector);
  100. /* orthogonalize against higher eigenvectors */
  101. for (j=0; j<i; j++) {
  102. alpha = -vectors_inner_product(n, eigs[j], curr_vector);
  103. scadd(curr_vector, n-1, alpha, eigs[j]);
  104. }
  105. len = norm(curr_vector, n-1);
  106. if (len<1e-10) {
  107. /* We have reached the null space (e.vec. associated
  108. * with e.val. 0)
  109. */
  110. goto exit;
  111. }
  112. vectors_scalar_mult(n, curr_vector, 1.0 / len, curr_vector);
  113. angle = vectors_inner_product(n, curr_vector, last_vec);
  114. } while (fabs(angle)<tol);
  115. /* the Rayleigh quotient (up to errors due to orthogonalization):
  116. * u*(A*u)/||A*u||)*||A*u||, where u=last_vec, and ||u||=1
  117. */
  118. evals[i]=angle*len;
  119. }
  120. exit:
  121. for (; i<neigs; i++) {
  122. /* compute the smallest eigenvector, which are
  123. * probably associated with eigenvalue 0 and for
  124. * which power-iteration is dangerous
  125. */
  126. curr_vector = eigs[i];
  127. /* guess the i-th eigen vector */
  128. for (j=0; j<n; j++)
  129. curr_vector[j] = rand()%100;
  130. /* orthogonalize against higher eigenvectors */
  131. for (j=0; j<i; j++) {
  132. alpha = -vectors_inner_product(n, eigs[j], curr_vector);
  133. scadd(curr_vector, n-1, alpha, eigs[j]);
  134. }
  135. len = norm(curr_vector, n-1);
  136. vectors_scalar_mult(n, curr_vector, 1.0 / len, curr_vector);
  137. evals[i]=0;
  138. }
  139. /* sort vectors by their evals, for overcoming possible mis-convergence: */
  140. for (i=0; i<neigs-1; i++) {
  141. largest_index=i;
  142. largest_eval=evals[largest_index];
  143. for (j=i+1; j<neigs; j++) {
  144. if (largest_eval<evals[j]) {
  145. largest_index=j;
  146. largest_eval=evals[largest_index];
  147. }
  148. }
  149. if (largest_index!=i) { // exchange eigenvectors:
  150. copy_vector(n, eigs[i], tmp_vec);
  151. copy_vector(n, eigs[largest_index], eigs[i]);
  152. copy_vector(n, tmp_vec, eigs[largest_index]);
  153. evals[largest_index]=evals[i];
  154. evals[i]=largest_eval;
  155. }
  156. }
  157. free (tmp_vec); free (last_vec);
  158. }
  159. static float*
  160. compute_avgs(DistType** Dij, int n, float* all_avg)
  161. {
  162. float* row_avg = gv_calloc(n, sizeof(float));
  163. int i,j;
  164. double sum=0, sum_row;
  165. for (i=0; i<n; i++) {
  166. sum_row=0;
  167. for (j=0; j<n; j++) {
  168. sum+=(double)Dij[i][j]*(double)Dij[i][j];
  169. sum_row+=(double)Dij[i][j]*(double)Dij[i][j];
  170. }
  171. row_avg[i]=(float)sum_row/n;
  172. }
  173. *all_avg=(float)sum/(n*n);
  174. return row_avg;
  175. }
  176. static float**
  177. compute_Bij(DistType** Dij, int n)
  178. {
  179. int i,j;
  180. float *storage = gv_calloc(n * n, sizeof(float));
  181. float **Bij = gv_calloc(n, sizeof(float *));
  182. float* row_avg;
  183. float all_avg;
  184. for (i=0; i<n; i++)
  185. Bij[i] = storage+i*n;
  186. row_avg = compute_avgs(Dij, n, &all_avg);
  187. for (i=0; i<n; i++) {
  188. for (j=0; j<=i; j++) {
  189. Bij[i][j]=-(float)Dij[i][j]*Dij[i][j]+row_avg[i]+row_avg[j]-all_avg;
  190. Bij[j][i]=Bij[i][j];
  191. }
  192. }
  193. free (row_avg);
  194. return Bij;
  195. }
  196. static void
  197. CMDS_orthog(int n, int dim, double** eigs, double tol,
  198. double* orthog, DistType** Dij)
  199. {
  200. int i,j;
  201. float** Bij = compute_Bij(Dij, n);
  202. double *evals = gv_calloc(dim, sizeof(double));
  203. assert(orthog != NULL);
  204. double *orthog_aux = gv_calloc(n, sizeof(double));
  205. for (i=0; i<n; i++) {
  206. orthog_aux[i]=orthog[i];
  207. }
  208. standardize(orthog_aux,n);
  209. power_iteration_orthog(Bij, n, dim, eigs, evals, orthog_aux, tol);
  210. for (i=0; i<dim; i++) {
  211. for (j=0; j<n; j++) {
  212. eigs[i][j]*=sqrt(fabs(evals[i]));
  213. }
  214. }
  215. free (Bij[0]); free (Bij);
  216. free (evals); free (orthog_aux);
  217. }
  218. #define SCALE_FACTOR 256
  219. int IMDS_given_dim(vtx_data* graph, int n, double* given_coords,
  220. double* new_coords, double conj_tol)
  221. {
  222. int iterations2;
  223. int i,j, rv = 0;
  224. DistType** Dij;
  225. double* x = given_coords;
  226. double uniLength;
  227. double* y = new_coords;
  228. float **lap = gv_calloc(n, sizeof(float *));
  229. float degree;
  230. double pos_i;
  231. double *balance = gv_calloc(n, sizeof(double));
  232. double b;
  233. bool converged;
  234. Dij = compute_apsp(graph, n);
  235. /* scaling up the distances to enable an 'sqrt' operation later
  236. * (in case distances are integers)
  237. */
  238. for (i=0; i<n; i++)
  239. for (j=0; j<n; j++)
  240. Dij[i][j]*=SCALE_FACTOR;
  241. assert(x!=NULL);
  242. {
  243. double sum1, sum2;
  244. for (sum1=sum2=0,i=1; i<n; i++) {
  245. for (j=0; j<i; j++) {
  246. sum1+=1.0/(Dij[i][j])*fabs(x[i]-x[j]);
  247. sum2+=1.0/(Dij[i][j]*Dij[i][j])*fabs(x[i]-x[j])*fabs(x[i]-x[j]);
  248. }
  249. }
  250. uniLength = isinf(sum2) ? 0 : sum1 / sum2;
  251. for (i=0; i<n; i++)
  252. x[i]*=uniLength;
  253. }
  254. /* smart ini: */
  255. CMDS_orthog(n, 1, &y, conj_tol, x, Dij);
  256. /* Compute Laplacian: */
  257. float *f_storage = gv_calloc(n * n, sizeof(float));
  258. for (i=0; i<n; i++) {
  259. lap[i]=f_storage+i*n;
  260. degree=0;
  261. for (j=0; j<n; j++) {
  262. if (j==i)
  263. continue;
  264. degree-=lap[i][j]=-1.0f/((float)Dij[i][j]*(float)Dij[i][j]); // w_{ij}
  265. }
  266. lap[i][i]=degree;
  267. }
  268. /* compute residual distances */
  269. /* if (x!=NULL) */
  270. {
  271. double diff;
  272. for (i=1; i<n; i++) {
  273. pos_i=x[i];
  274. for (j=0; j<i; j++) {
  275. diff=(double)Dij[i][j]*(double)Dij[i][j]-(pos_i-x[j])*(pos_i-x[j]);
  276. Dij[i][j]=Dij[j][i]=diff>0 ? (DistType)sqrt(diff) : 0;
  277. }
  278. }
  279. }
  280. /* Compute the balance vector: */
  281. for (i=0; i<n; i++) {
  282. pos_i=y[i];
  283. balance[i]=0;
  284. for (j=0; j<n; j++) {
  285. if (j==i)
  286. continue;
  287. if (pos_i>=y[j]) {
  288. balance[i]+=Dij[i][j]*(-lap[i][j]); // w_{ij}*delta_{ij}
  289. }
  290. else {
  291. balance[i]-=Dij[i][j]*(-lap[i][j]); // w_{ij}*delta_{ij}
  292. }
  293. }
  294. }
  295. for (converged=false,iterations2=0; iterations2<200 && !converged; iterations2++) {
  296. if (conjugate_gradient_f(lap, y, balance, n, conj_tol, n, true) < 0) {
  297. rv = 1;
  298. goto cleanup;
  299. }
  300. converged = true;
  301. for (i=0; i<n; i++) {
  302. pos_i=y[i];
  303. b=0;
  304. for (j=0; j<n; j++) {
  305. if (j==i)
  306. continue;
  307. if (pos_i>=y[j]) {
  308. b+=Dij[i][j]*(-lap[i][j]);
  309. }
  310. else {
  311. b-=Dij[i][j]*(-lap[i][j]);
  312. }
  313. }
  314. if ((b != balance[i]) && (fabs(1-b/balance[i])>1e-5)) {
  315. converged = false;
  316. balance[i]=b;
  317. }
  318. }
  319. }
  320. for (i = 0; !(fabs(uniLength) < DBL_EPSILON) && i < n; i++) {
  321. x[i] /= uniLength;
  322. y[i] /= uniLength;
  323. }
  324. cleanup:
  325. free (Dij[0]); free (Dij);
  326. free (lap[0]); free (lap);
  327. free (balance);
  328. return rv;
  329. }
  330. #endif /* DIGCOLA */