tsne.cpp 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. /*
  2. *
  3. * Copyright (c) 2014, Laurens van der Maaten (Delft University of Technology)
  4. * All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. * 3. All advertising materials mentioning features or use of this software
  14. * must display the following acknowledgement:
  15. * This product includes software developed by the Delft University of Technology.
  16. * 4. Neither the name of the Delft University of Technology nor the names of
  17. * its contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * THIS SOFTWARE IS PROVIDED BY LAURENS VAN DER MAATEN ''AS IS'' AND ANY EXPRESS
  21. * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  22. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
  23. * EVENT SHALL LAURENS VAN DER MAATEN BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  25. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  26. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  27. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  28. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
  29. * OF SUCH DAMAGE.
  30. *
  31. */
  32. #include <cfloat>
  33. #include <cmath>
  34. #include <cstdlib>
  35. #include <cstdio>
  36. #include <cstring>
  37. #include <ctime>
  38. #include "sptree.h"
  39. #include "tsne.h"
  40. #include "vptree.h"
  41. #pragma warning(disable:4996)
  42. using namespace std;
  43. static double sign(double x) { return (x == .0 ? .0 : (x < .0 ? -1.0 : 1.0)); }
  44. static void zeroMean(double* X, int N, int D);
  45. static void computeGaussianPerplexity(double* X, int N, int D, double* P, double perplexity);
  46. static void computeGaussianPerplexity(double* X, int N, int D, unsigned int** _row_P, unsigned int** _col_P, double** _val_P, double perplexity, int K);
  47. static double randn();
  48. static void computeExactGradient(double* P, double* Y, int N, int D, double* dC);
  49. static void computeGradient(unsigned int* inp_row_P, unsigned int* inp_col_P, double* inp_val_P, double* Y, int N, int D, double* dC, double theta);
  50. static double evaluateError(double* P, double* Y, int N, int D);
  51. static double evaluateError(unsigned int* row_P, unsigned int* col_P, double* val_P, double* Y, int N, int D, double theta);
  52. static void computeSquaredEuclideanDistance(double* X, int N, int D, double* DD);
  53. static void symmetrizeMatrix(unsigned int** row_P, unsigned int** col_P, double** val_P, int N);
  54. // Perform t-SNE
  55. void TSNE::run(double* X, int N, int D, double* Y, int no_dims, double perplexity, double theta, double eta, int rand_seed,
  56. bool skip_random_init, int max_iter, int stop_lying_iter, int mom_switch_iter) {
  57. // Set random seed
  58. if (skip_random_init != true) {
  59. if(rand_seed >= 0) {
  60. printf("Using random seed: %d\n", rand_seed);
  61. srand((unsigned int) rand_seed);
  62. } else {
  63. printf("Using current time as random seed...\n");
  64. srand(time(NULL));
  65. }
  66. }
  67. // Determine whether we are using an exact algorithm
  68. if(N - 1 < 3 * perplexity) { printf("Perplexity too large for the number of data points!\n"); exit(1); }
  69. printf("Using no_dims = %d, perplexity = %f, and theta = %f\n", no_dims, perplexity, theta);
  70. bool exact = (theta == .0) ? true : false;
  71. // Set learning parameters
  72. float total_time = .0;
  73. clock_t start, end;
  74. double momentum = .5, final_momentum = .8;
  75. // Allocate some memory
  76. double* dY = (double*) malloc(N * no_dims * sizeof(double));
  77. double* uY = (double*) malloc(N * no_dims * sizeof(double));
  78. double* gains = (double*) malloc(N * no_dims * sizeof(double));
  79. if(dY == NULL || uY == NULL || gains == NULL) { printf("Memory allocation failed!\n"); exit(1); }
  80. for(int i = 0; i < N * no_dims; i++) uY[i] = .0;
  81. for(int i = 0; i < N * no_dims; i++) gains[i] = 1.0;
  82. // Normalize input data (to prevent numerical problems)
  83. printf("Computing input similarities...\n");
  84. start = clock();
  85. zeroMean(X, N, D);
  86. double max_X = .0;
  87. for(int i = 0; i < N * D; i++) {
  88. if(fabs(X[i]) > max_X) max_X = fabs(X[i]);
  89. }
  90. for(int i = 0; i < N * D; i++) X[i] /= max_X;
  91. // Compute input similarities for exact t-SNE
  92. double* P = nullptr; unsigned int* row_P = nullptr; unsigned int* col_P = nullptr; double* val_P = nullptr;
  93. if(exact) {
  94. // Compute similarities
  95. printf("Exact?");
  96. P = (double*) malloc(N * N * sizeof(double));
  97. if(P == NULL) { printf("Memory allocation failed!\n"); exit(1); }
  98. computeGaussianPerplexity(X, N, D, P, perplexity);
  99. // Symmetrize input similarities
  100. printf("Symmetrizing...\n");
  101. int nN = 0;
  102. for(int n = 0; n < N; n++) {
  103. int mN = (n + 1) * N;
  104. for(int m = n + 1; m < N; m++) {
  105. P[nN + m] += P[mN + n];
  106. P[mN + n] = P[nN + m];
  107. mN += N;
  108. }
  109. nN += N;
  110. }
  111. double sum_P = .0;
  112. for(int i = 0; i < N * N; i++) sum_P += P[i];
  113. for(int i = 0; i < N * N; i++) P[i] /= sum_P;
  114. }
  115. // Compute input similarities for approximate t-SNE
  116. else {
  117. // Compute asymmetric pairwise input similarities
  118. computeGaussianPerplexity(X, N, D, &row_P, &col_P, &val_P, perplexity, (int) (3 * perplexity));
  119. // Symmetrize input similarities
  120. symmetrizeMatrix(&row_P, &col_P, &val_P, N);
  121. double sum_P = .0;
  122. for(int i = 0; i < row_P[N]; i++) sum_P += val_P[i];
  123. for(int i = 0; i < row_P[N]; i++) val_P[i] /= sum_P;
  124. }
  125. end = clock();
  126. // Lie about the P-values
  127. if(exact) { for(int i = 0; i < N * N; i++) P[i] *= 12.0; }
  128. else { for(int i = 0; i < row_P[N]; i++) val_P[i] *= 12.0; }
  129. // Initialize solution (randomly)
  130. if (skip_random_init != true) {
  131. for(int i = 0; i < N * no_dims; i++) Y[i] = randn() * .0001;
  132. }
  133. // Perform main training loop
  134. if(exact) printf("Input similarities computed in %4.2f seconds!\nLearning embedding...\n", (float) (end - start) / CLOCKS_PER_SEC);
  135. else printf("Input similarities computed in %4.2f seconds (sparsity = %f)!\nLearning embedding...\n", (float) (end - start) / CLOCKS_PER_SEC, (double) row_P[N] / ((double) N * (double) N));
  136. start = clock();
  137. double last_C = -1;
  138. for(int iter = 0; iter < max_iter; iter++) {
  139. // Compute (approximate) gradient
  140. if(exact) computeExactGradient(P, Y, N, no_dims, dY);
  141. else computeGradient(row_P, col_P, val_P, Y, N, no_dims, dY, theta);
  142. // Update gains
  143. for(int i = 0; i < N * no_dims; i++) gains[i] = (sign(dY[i]) != sign(uY[i])) ? (gains[i] + .2) : (gains[i] * .8);
  144. for(int i = 0; i < N * no_dims; i++) if(gains[i] < .01) gains[i] = .01;
  145. // Perform gradient update (with momentum and gains)
  146. for(int i = 0; i < N * no_dims; i++) uY[i] = momentum * uY[i] - eta * gains[i] * dY[i];
  147. for(int i = 0; i < N * no_dims; i++) Y[i] = Y[i] + uY[i];
  148. // Make solution zero-mean
  149. zeroMean(Y, N, no_dims);
  150. // Stop lying about the P-values after a while, and switch momentum
  151. if(iter == stop_lying_iter) {
  152. if(exact) { for(int i = 0; i < N * N; i++) P[i] /= 12.0; }
  153. else { for(int i = 0; i < row_P[N]; i++) val_P[i] /= 12.0; }
  154. }
  155. if(iter == mom_switch_iter) momentum = final_momentum;
  156. // Print out progress
  157. if (iter > 0 && (iter % 50 == 0 || iter == max_iter - 1)) {
  158. end = clock();
  159. double C = .0;
  160. if(exact) C = evaluateError(P, Y, N, no_dims);
  161. else C = evaluateError(row_P, col_P, val_P, Y, N, no_dims, theta); // doing approximate computation here!
  162. if(iter == 0)
  163. printf("Iteration %d: error is %f\n", iter + 1, C);
  164. else {
  165. total_time += (float) (end - start) / CLOCKS_PER_SEC;
  166. printf("Iteration %d: error is %f (50 iterations in %4.2f seconds)\n", iter, C, (float) (end - start) / CLOCKS_PER_SEC);
  167. }
  168. start = clock();
  169. /*if (std::fabs(last_C - C) < 0.001) {
  170. break;
  171. }
  172. last_C = C;*/
  173. }
  174. }
  175. end = clock(); total_time += (float) (end - start) / CLOCKS_PER_SEC;
  176. // Clean up memory
  177. free(dY);
  178. free(uY);
  179. free(gains);
  180. if(exact) free(P);
  181. else {
  182. free(row_P); row_P = NULL;
  183. free(col_P); col_P = NULL;
  184. free(val_P); val_P = NULL;
  185. }
  186. printf("Fitting performed in %4.2f seconds.\n", total_time);
  187. }
  188. // Compute gradient of the t-SNE cost function (using Barnes-Hut algorithm)
  189. static void computeGradient(unsigned int* inp_row_P, unsigned int* inp_col_P, double* inp_val_P, double* Y, int N, int D, double* dC, double theta)
  190. {
  191. // Construct space-partitioning tree on current map
  192. SPTree* tree = new SPTree(D, Y, N);
  193. // Compute all terms required for t-SNE gradient
  194. double sum_Q = .0;
  195. double* pos_f = (double*) calloc(N * D, sizeof(double));
  196. double* neg_f = (double*) calloc(N * D, sizeof(double));
  197. if(pos_f == NULL || neg_f == NULL) { printf("Memory allocation failed!\n"); exit(1); }
  198. tree->computeEdgeForces(inp_row_P, inp_col_P, inp_val_P, N, pos_f);
  199. for(int n = 0; n < N; n++) tree->computeNonEdgeForces(n, theta, neg_f + n * D, &sum_Q);
  200. // Compute final t-SNE gradient
  201. for(int i = 0; i < N * D; i++) {
  202. dC[i] = pos_f[i] - (neg_f[i] / sum_Q);
  203. }
  204. free(pos_f);
  205. free(neg_f);
  206. delete tree;
  207. }
  208. // Compute gradient of the t-SNE cost function (exact)
  209. static void computeExactGradient(double* P, double* Y, int N, int D, double* dC) {
  210. // Make sure the current gradient contains zeros
  211. for(int i = 0; i < N * D; i++) dC[i] = 0.0;
  212. // Compute the squared Euclidean distance matrix
  213. double* DD = (double*) malloc(N * N * sizeof(double));
  214. if(DD == NULL) { printf("Memory allocation failed!\n"); exit(1); }
  215. computeSquaredEuclideanDistance(Y, N, D, DD);
  216. // Compute Q-matrix and normalization sum
  217. double* Q = (double*) malloc(N * N * sizeof(double));
  218. if(Q == NULL) { printf("Memory allocation failed!\n"); exit(1); }
  219. double sum_Q = .0;
  220. int nN = 0;
  221. for(int n = 0; n < N; n++) {
  222. for(int m = 0; m < N; m++) {
  223. if(n != m) {
  224. Q[nN + m] = 1 / (1 + DD[nN + m]);
  225. sum_Q += Q[nN + m];
  226. }
  227. }
  228. nN += N;
  229. }
  230. // Perform the computation of the gradient
  231. nN = 0;
  232. int nD = 0;
  233. for(int n = 0; n < N; n++) {
  234. int mD = 0;
  235. for(int m = 0; m < N; m++) {
  236. if(n != m) {
  237. double mult = (P[nN + m] - (Q[nN + m] / sum_Q)) * Q[nN + m];
  238. for(int d = 0; d < D; d++) {
  239. dC[nD + d] += (Y[nD + d] - Y[mD + d]) * mult;
  240. }
  241. }
  242. mD += D;
  243. }
  244. nN += N;
  245. nD += D;
  246. }
  247. // Free memory
  248. free(DD); DD = NULL;
  249. free(Q); Q = NULL;
  250. }
  251. // Evaluate t-SNE cost function (exactly)
  252. static double evaluateError(double* P, double* Y, int N, int D) {
  253. // Compute the squared Euclidean distance matrix
  254. double* DD = (double*) malloc(N * N * sizeof(double));
  255. double* Q = (double*) malloc(N * N * sizeof(double));
  256. if(DD == NULL || Q == NULL) { printf("Memory allocation failed!\n"); exit(1); }
  257. computeSquaredEuclideanDistance(Y, N, D, DD);
  258. // Compute Q-matrix and normalization sum
  259. int nN = 0;
  260. double sum_Q = DBL_MIN;
  261. for(int n = 0; n < N; n++) {
  262. for(int m = 0; m < N; m++) {
  263. if(n != m) {
  264. Q[nN + m] = 1 / (1 + DD[nN + m]);
  265. sum_Q += Q[nN + m];
  266. }
  267. else Q[nN + m] = DBL_MIN;
  268. }
  269. nN += N;
  270. }
  271. for(int i = 0; i < N * N; i++) Q[i] /= sum_Q;
  272. // Sum t-SNE error
  273. double C = .0;
  274. for(int n = 0; n < N * N; n++) {
  275. C += P[n] * log((P[n] + FLT_MIN) / (Q[n] + FLT_MIN));
  276. }
  277. // Clean up memory
  278. free(DD);
  279. free(Q);
  280. return C;
  281. }
  282. // Evaluate t-SNE cost function (approximately)
  283. static double evaluateError(unsigned int* row_P, unsigned int* col_P, double* val_P, double* Y, int N, int D, double theta)
  284. {
  285. // Get estimate of normalization term
  286. SPTree* tree = new SPTree(D, Y, N);
  287. double* buff = (double*) calloc(D, sizeof(double));
  288. double sum_Q = .0;
  289. for(int n = 0; n < N; n++) tree->computeNonEdgeForces(n, theta, buff, &sum_Q);
  290. // Loop over all edges to compute t-SNE error
  291. int ind1, ind2;
  292. double C = .0, Q;
  293. for(int n = 0; n < N; n++) {
  294. ind1 = n * D;
  295. for(int i = row_P[n]; i < row_P[n + 1]; i++) {
  296. Q = .0;
  297. ind2 = col_P[i] * D;
  298. for(int d = 0; d < D; d++) buff[d] = Y[ind1 + d];
  299. for(int d = 0; d < D; d++) buff[d] -= Y[ind2 + d];
  300. for(int d = 0; d < D; d++) Q += buff[d] * buff[d];
  301. Q = (1.0 / (1.0 + Q)) / sum_Q;
  302. C += val_P[i] * log((val_P[i] + FLT_MIN) / (Q + FLT_MIN));
  303. }
  304. }
  305. // Clean up memory
  306. free(buff);
  307. delete tree;
  308. return C;
  309. }
  310. // Compute input similarities with a fixed perplexity
  311. static void computeGaussianPerplexity(double* X, int N, int D, double* P, double perplexity) {
  312. // Compute the squared Euclidean distance matrix
  313. double* DD = (double*) malloc(N * N * sizeof(double));
  314. if(DD == NULL) { printf("Memory allocation failed!\n"); exit(1); }
  315. computeSquaredEuclideanDistance(X, N, D, DD);
  316. // Compute the Gaussian kernel row by row
  317. int nN = 0;
  318. for(int n = 0; n < N; n++) {
  319. // Initialize some variables
  320. bool found = false;
  321. double beta = 1.0;
  322. double min_beta = -DBL_MAX;
  323. double max_beta = DBL_MAX;
  324. double tol = 1e-5;
  325. double sum_P;
  326. // Iterate until we found a good perplexity
  327. int iter = 0;
  328. while(!found && iter < 200) {
  329. // Compute Gaussian kernel row
  330. for(int m = 0; m < N; m++) P[nN + m] = exp(-beta * DD[nN + m]);
  331. P[nN + n] = DBL_MIN;
  332. // Compute entropy of current row
  333. sum_P = DBL_MIN;
  334. for(int m = 0; m < N; m++) sum_P += P[nN + m];
  335. double H = 0.0;
  336. for(int m = 0; m < N; m++) H += beta * (DD[nN + m] * P[nN + m]);
  337. H = (H / sum_P) + log(sum_P);
  338. // Evaluate whether the entropy is within the tolerance level
  339. double Hdiff = H - log(perplexity);
  340. if(Hdiff < tol && -Hdiff < tol) {
  341. found = true;
  342. }
  343. else {
  344. if(Hdiff > 0) {
  345. min_beta = beta;
  346. if(max_beta == DBL_MAX || max_beta == -DBL_MAX)
  347. beta *= 2.0;
  348. else
  349. beta = (beta + max_beta) / 2.0;
  350. }
  351. else {
  352. max_beta = beta;
  353. if(min_beta == -DBL_MAX || min_beta == DBL_MAX)
  354. beta /= 2.0;
  355. else
  356. beta = (beta + min_beta) / 2.0;
  357. }
  358. }
  359. // Update iteration counter
  360. iter++;
  361. }
  362. // Row normalize P
  363. for(int m = 0; m < N; m++) P[nN + m] /= sum_P;
  364. nN += N;
  365. }
  366. // Clean up memory
  367. free(DD); DD = NULL;
  368. }
  369. // Compute input similarities with a fixed perplexity using ball trees (this function allocates memory another function should free)
  370. static void computeGaussianPerplexity(double* X, int N, int D, unsigned int** _row_P, unsigned int** _col_P, double** _val_P, double perplexity, int K) {
  371. if(perplexity > K) printf("Perplexity should be lower than K!\n");
  372. // Allocate the memory we need
  373. *_row_P = (unsigned int*) malloc((N + 1) * sizeof(unsigned int));
  374. *_col_P = (unsigned int*) calloc(N * K, sizeof(unsigned int));
  375. *_val_P = (double*) calloc(N * K, sizeof(double));
  376. if(*_row_P == NULL || *_col_P == NULL || *_val_P == NULL) { printf("Memory allocation failed!\n"); exit(1); }
  377. unsigned int* row_P = *_row_P;
  378. unsigned int* col_P = *_col_P;
  379. double* val_P = *_val_P;
  380. double* cur_P = (double*) malloc((N - 1) * sizeof(double));
  381. if(cur_P == NULL) { printf("Memory allocation failed!\n"); exit(1); }
  382. row_P[0] = 0;
  383. for(int n = 0; n < N; n++) row_P[n + 1] = row_P[n] + (unsigned int) K;
  384. // Build ball tree on data set
  385. VpTree<DataPoint, euclidean_distance>* tree = new VpTree<DataPoint, euclidean_distance>();
  386. vector<DataPoint> obj_X(N, DataPoint(D, -1, X));
  387. for(int n = 0; n < N; n++) obj_X[n] = DataPoint(D, n, X + n * D);
  388. tree->create(obj_X);
  389. // Loop over all points to find nearest neighbors
  390. printf("Building tree...\n");
  391. vector<DataPoint> indices;
  392. vector<double> distances;
  393. for(int n = 0; n < N; n++) {
  394. if(n % 10000 == 0) printf(" - point %d of %d\n", n, N);
  395. // Find nearest neighbors
  396. indices.clear();
  397. distances.clear();
  398. tree->search(obj_X[n], K + 1, &indices, &distances);
  399. // Initialize some variables for binary search
  400. bool found = false;
  401. double beta = 1.0;
  402. double min_beta = -DBL_MAX;
  403. double max_beta = DBL_MAX;
  404. double tol = 1e-5;
  405. // Iterate until we found a good perplexity
  406. int iter = 0; double sum_P;
  407. while(!found && iter < 200) {
  408. // Compute Gaussian kernel row
  409. for(int m = 0; m < K; m++) cur_P[m] = exp(-beta * distances[m + 1] * distances[m + 1]);
  410. // Compute entropy of current row
  411. sum_P = DBL_MIN;
  412. for(int m = 0; m < K; m++) sum_P += cur_P[m];
  413. double H = .0;
  414. for(int m = 0; m < K; m++) H += beta * (distances[m + 1] * distances[m + 1] * cur_P[m]);
  415. H = (H / sum_P) + log(sum_P);
  416. // Evaluate whether the entropy is within the tolerance level
  417. double Hdiff = H - log(perplexity);
  418. if(Hdiff < tol && -Hdiff < tol) {
  419. found = true;
  420. }
  421. else {
  422. if(Hdiff > 0) {
  423. min_beta = beta;
  424. if(max_beta == DBL_MAX || max_beta == -DBL_MAX)
  425. beta *= 2.0;
  426. else
  427. beta = (beta + max_beta) / 2.0;
  428. }
  429. else {
  430. max_beta = beta;
  431. if(min_beta == -DBL_MAX || min_beta == DBL_MAX)
  432. beta /= 2.0;
  433. else
  434. beta = (beta + min_beta) / 2.0;
  435. }
  436. }
  437. // Update iteration counter
  438. iter++;
  439. }
  440. // Row-normalize current row of P and store in matrix
  441. for(unsigned int m = 0; m < K; m++) cur_P[m] /= sum_P;
  442. for(unsigned int m = 0; m < K; m++) {
  443. col_P[row_P[n] + m] = (unsigned int) indices[m + 1].index();
  444. val_P[row_P[n] + m] = cur_P[m];
  445. }
  446. }
  447. // Clean up memory
  448. obj_X.clear();
  449. free(cur_P);
  450. delete tree;
  451. }
  452. // Symmetrizes a sparse matrix
  453. static void symmetrizeMatrix(unsigned int** _row_P, unsigned int** _col_P, double** _val_P, int N) {
  454. // Get sparse matrix
  455. unsigned int* row_P = *_row_P;
  456. unsigned int* col_P = *_col_P;
  457. double* val_P = *_val_P;
  458. // Count number of elements and row counts of symmetric matrix
  459. int* row_counts = (int*) calloc(N, sizeof(int));
  460. if(row_counts == NULL) { printf("Memory allocation failed!\n"); exit(1); }
  461. for(int n = 0; n < N; n++) {
  462. for(int i = row_P[n]; i < row_P[n + 1]; i++) {
  463. // Check whether element (col_P[i], n) is present
  464. bool present = false;
  465. for(int m = row_P[col_P[i]]; m < row_P[col_P[i] + 1]; m++) {
  466. if(col_P[m] == n) present = true;
  467. }
  468. if(present) row_counts[n]++;
  469. else {
  470. row_counts[n]++;
  471. row_counts[col_P[i]]++;
  472. }
  473. }
  474. }
  475. int no_elem = 0;
  476. for(int n = 0; n < N; n++) no_elem += row_counts[n];
  477. // Allocate memory for symmetrized matrix
  478. unsigned int* sym_row_P = (unsigned int*) malloc((N + 1) * sizeof(unsigned int));
  479. unsigned int* sym_col_P = (unsigned int*) malloc(no_elem * sizeof(unsigned int));
  480. double* sym_val_P = (double*) malloc(no_elem * sizeof(double));
  481. if(sym_row_P == NULL || sym_col_P == NULL || sym_val_P == NULL) { printf("Memory allocation failed!\n"); exit(1); }
  482. // Construct new row indices for symmetric matrix
  483. sym_row_P[0] = 0;
  484. for(int n = 0; n < N; n++) sym_row_P[n + 1] = sym_row_P[n] + (unsigned int) row_counts[n];
  485. // Fill the result matrix
  486. int* offset = (int*) calloc(N, sizeof(int));
  487. if(offset == NULL) { printf("Memory allocation failed!\n"); exit(1); }
  488. for(int n = 0; n < N; n++) {
  489. for(unsigned int i = row_P[n]; i < row_P[n + 1]; i++) { // considering element(n, col_P[i])
  490. // Check whether element (col_P[i], n) is present
  491. bool present = false;
  492. for(unsigned int m = row_P[col_P[i]]; m < row_P[col_P[i] + 1]; m++) {
  493. if(col_P[m] == n) {
  494. present = true;
  495. if(n <= col_P[i]) { // make sure we do not add elements twice
  496. sym_col_P[sym_row_P[n] + offset[n]] = col_P[i];
  497. sym_col_P[sym_row_P[col_P[i]] + offset[col_P[i]]] = n;
  498. sym_val_P[sym_row_P[n] + offset[n]] = val_P[i] + val_P[m];
  499. sym_val_P[sym_row_P[col_P[i]] + offset[col_P[i]]] = val_P[i] + val_P[m];
  500. }
  501. }
  502. }
  503. // If (col_P[i], n) is not present, there is no addition involved
  504. if(!present) {
  505. sym_col_P[sym_row_P[n] + offset[n]] = col_P[i];
  506. sym_col_P[sym_row_P[col_P[i]] + offset[col_P[i]]] = n;
  507. sym_val_P[sym_row_P[n] + offset[n]] = val_P[i];
  508. sym_val_P[sym_row_P[col_P[i]] + offset[col_P[i]]] = val_P[i];
  509. }
  510. // Update offsets
  511. if(!present || (present && n <= col_P[i])) {
  512. offset[n]++;
  513. if(col_P[i] != n) offset[col_P[i]]++;
  514. }
  515. }
  516. }
  517. // Divide the result by two
  518. for(int i = 0; i < no_elem; i++) sym_val_P[i] /= 2.0;
  519. // Return symmetrized matrices
  520. free(*_row_P); *_row_P = sym_row_P;
  521. free(*_col_P); *_col_P = sym_col_P;
  522. free(*_val_P); *_val_P = sym_val_P;
  523. // Free up some memery
  524. free(offset); offset = NULL;
  525. free(row_counts); row_counts = NULL;
  526. }
  527. // Compute squared Euclidean distance matrix
  528. static void computeSquaredEuclideanDistance(double* X, int N, int D, double* DD) {
  529. const double* XnD = X;
  530. for(int n = 0; n < N; ++n, XnD += D) {
  531. const double* XmD = XnD + D;
  532. double* curr_elem = &DD[n*N + n];
  533. *curr_elem = 0.0;
  534. double* curr_elem_sym = curr_elem + N;
  535. for(int m = n + 1; m < N; ++m, XmD+=D, curr_elem_sym+=N) {
  536. *(++curr_elem) = 0.0;
  537. for(int d = 0; d < D; ++d) {
  538. *curr_elem += (XnD[d] - XmD[d]) * (XnD[d] - XmD[d]);
  539. }
  540. *curr_elem_sym = *curr_elem;
  541. }
  542. }
  543. }
  544. // Makes data zero-mean
  545. static void zeroMean(double* X, int N, int D) {
  546. // Compute data mean
  547. double* mean = (double*) calloc(D, sizeof(double));
  548. if(mean == NULL) { printf("Memory allocation failed!\n"); exit(1); }
  549. int nD = 0;
  550. for(int n = 0; n < N; n++) {
  551. for(int d = 0; d < D; d++) {
  552. mean[d] += X[nD + d];
  553. }
  554. nD += D;
  555. }
  556. for(int d = 0; d < D; d++) {
  557. mean[d] /= (double) N;
  558. }
  559. // Subtract data mean
  560. nD = 0;
  561. for(int n = 0; n < N; n++) {
  562. for(int d = 0; d < D; d++) {
  563. X[nD + d] -= mean[d];
  564. }
  565. nD += D;
  566. }
  567. free(mean); mean = NULL;
  568. }
  569. // Generates a Gaussian random number
  570. static double randn() {
  571. double x, y, radius;
  572. do {
  573. x = 2 * (rand() / ((double) RAND_MAX + 1)) - 1;
  574. y = 2 * (rand() / ((double) RAND_MAX + 1)) - 1;
  575. radius = (x * x) + (y * y);
  576. } while((radius >= 1.0) || (radius == 0.0));
  577. radius = sqrt(-2 * log(radius) / radius);
  578. x *= radius;
  579. y *= radius;
  580. return x;
  581. }
  582. // Function that loads data from a t-SNE file
  583. // Note: this function does a malloc that should be freed elsewhere
  584. bool TSNE::load_data(double** data, int* n, int* d, int* no_dims, double* theta, double* perplexity, int* rand_seed, int* max_iter) {
  585. // Open file, read first 2 integers, allocate memory, and read the data
  586. FILE *h;
  587. if((h = fopen("data.dat", "r+b")) == NULL) {
  588. printf("Error: could not open data file.\n");
  589. return false;
  590. }
  591. fread(n, sizeof(int), 1, h); // number of datapoints
  592. fread(d, sizeof(int), 1, h); // original dimensionality
  593. fread(theta, sizeof(double), 1, h); // gradient accuracy
  594. fread(perplexity, sizeof(double), 1, h); // perplexity
  595. fread(no_dims, sizeof(int), 1, h); // output dimensionality
  596. fread(max_iter, sizeof(int),1,h); // maximum number of iterations
  597. *data = (double*) malloc(*d * *n * sizeof(double));
  598. if(*data == NULL) { printf("Memory allocation failed!\n"); exit(1); }
  599. fread(*data, sizeof(double), *n * *d, h); // the data
  600. if(!feof(h)) fread(rand_seed, sizeof(int), 1, h); // random seed
  601. fclose(h);
  602. printf("Read the %i x %i data matrix successfully!\n", *n, *d);
  603. return true;
  604. }
  605. // Function that saves map to a t-SNE file
  606. void TSNE::save_data(double* data, int* landmarks, double* costs, int n, int d) {
  607. // Open file, write first 2 integers and then the data
  608. FILE *h;
  609. if((h = fopen("result.dat", "w+b")) == NULL) {
  610. printf("Error: could not open data file.\n");
  611. return;
  612. }
  613. fwrite(&n, sizeof(int), 1, h);
  614. fwrite(&d, sizeof(int), 1, h);
  615. fwrite(data, sizeof(double), n * d, h);
  616. fwrite(landmarks, sizeof(int), n, h);
  617. fwrite(costs, sizeof(double), n, h);
  618. fclose(h);
  619. printf("Wrote the %i x %i data matrix successfully!\n", n, d);
  620. }