ccmath_grass_wrapper.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480
  1. /*****************************************************************************
  2. *
  3. * MODULE: Grass numerical math interface
  4. * AUTHOR(S): Soeren Gebbert, Berlin (GER) Dec 2006
  5. * soerengebbert <at> googlemail <dot> com
  6. *
  7. * PURPOSE: ccmath library function wrapper
  8. * part of the gmath library
  9. *
  10. * COPYRIGHT: (C) 2010 by the GRASS Development Team
  11. *
  12. * This program is free software under the GNU General Public
  13. * License (>=v2). Read the file COPYING that comes with GRASS
  14. * for details.
  15. *
  16. *****************************************************************************/
  17. #if defined(HAVE_CCMATH)
  18. #include <ccmath.h>
  19. #else
  20. #include <grass/ccmath_grass.h>
  21. #endif
  22. /**
  23. * Documentation and ccmath library version 2.2.1 by Daniel A. Atkinson
  24. *
  25. Chapter 1
  26. LINEAR ALGEBRA
  27. Summary
  28. The matrix algebra library contains functions that
  29. perform the standard computations of linear algebra.
  30. General areas covered are:
  31. o Solution of Linear Systems
  32. o Matrix Inversion
  33. o Eigensystem Analysis
  34. o Matrix Utility Operations
  35. o Singular Value Decomposition
  36. The operations covered here are fundamental to many
  37. areas of mathematics and statistics. Thus, functions
  38. in this library segment are called by other library
  39. functions. Both real and complex valued matrices
  40. are covered by functions in the first four of these
  41. categories.
  42. Notes on Contents
  43. Functions in this library segment provide the basic operations of
  44. numerical linear algebra and some useful utility functions for operations on
  45. vectors and matrices. The following list describes the functions available for
  46. operations with real-valued matrices.
  47. o Solving and Inverting Linear Systems:
  48. solv --------- solve a general system of real linear equations.
  49. solvps ------- solve a real symmetric linear system.
  50. solvru ------- solve a real right upper triangular linear system.
  51. solvtd ------- solve a tridiagonal real linear system.
  52. minv --------- invert a general real square matrix.
  53. psinv -------- invert a real symmetric matrix.
  54. ruinv -------- invert a right upper triangular matrix.
  55. The solution of a general linear system and efficient algorithms for
  56. solving special systems with symmetric and tridiagonal matrices are provided
  57. by these functions. The general solution function employs a LU factorization
  58. with partial pivoting and it is very robust. It will work efficiently on any
  59. problem that is not ill-conditioned. The symmetric matrix solution is based
  60. on a modified Cholesky factorization. It is best used on positive definite
  61. matrices that do not require pivoting for numeric stability. Tridiagonal
  62. solvers require order-N operations (N = dimension). Thus, they are highly
  63. recommended for this important class of sparse systems. Two matrix inversion
  64. routines are provided. The general inversion function is again LU based. It
  65. is suitable for use on any stable (ie. well-conditioned) problem. The
  66. Cholesky based symmetric matrix inversion is efficient and safe for use on
  67. matrices known to be positive definite, such as the variance matrices
  68. encountered in statistical computations. Both the solver and the inverse
  69. functions are designed to enhance data locality. They are very effective
  70. on modern microprocessors.
  71. o Eigensystem Analysis:
  72. eigen ------ extract all eigen values and vectors of a real
  73. symmetric matrix.
  74. eigval ----- extract the eigen values of a real symmetric matrix.
  75. evmax ------ compute the eigen value of maximum absolute magnitude
  76. and its corresponding vector for a symmetric matrix.
  77. Eigensystem functions operate on real symmetric matrices. Two forms of
  78. the general eigen routine are provided because the computation of eigen values
  79. only is much faster when vectors are not required. The basic algorithms use
  80. a Householder reduction to tridiagonal form followed by QR iterations with
  81. shifts to enhance convergence. This has become the accepted standard for
  82. symmetric eigensystem computation. The evmax function uses an efficient
  83. iterative power method algorithm to extract the eigen value of maximum
  84. absolute size and the corresponding eigenvector.
  85. o Singular Value Decomposition:
  86. svdval ----- compute the singular values of a m by n real matrix.
  87. sv2val ----- compute the singular values of a real matrix
  88. efficiently for m >> n.
  89. svduv ------ compute the singular values and the transformation
  90. matrices u and v for a real m by n matrix.
  91. sv2uv ------ compute the singular values and transformation
  92. matrices efficiently for m >> n.
  93. svdu1v ----- compute the singular values and transformation
  94. matrices u1 and v, where u1 overloads the input
  95. with the first n column vectors of u.
  96. sv2u1v ----- compute the singular values and the transformation
  97. matrices u1 and v efficiently for m >> n.
  98. Singular value decomposition is extremely useful when dealing with linear
  99. systems that may be singular. Singular values with values near zero are flags
  100. of a potential rank deficiency in the system matrix. They can be used to
  101. identify the presence of an ill-conditioned problem and, in some cases, to
  102. deal with the potential instability. They are applied to the linear least
  103. squares problem in this library. Singular values also define some important
  104. matrix norm parameters such as the 2-norm and the condition value. A complete
  105. decomposition provides both singular values and an orthogonal decomposition of
  106. vector spaces related to the matrix identifying the range and null-space.
  107. Fortunately, a highly stable algorithm based on Householder reduction to
  108. bidiagonal form and QR rotations can be used to implement the decomposition.
  109. The library provides two forms with one more efficient when the dimensions
  110. satisfy m > (3/2)n.
  111. General Technical Comments
  112. Efficient computation with matrices on modern processors must be
  113. adapted to the storage scheme employed for matrix elements. The functions
  114. of this library segment do not employ the multidimensional array intrinsic
  115. of the C language. Access to elements employs the simple row-major scheme
  116. described here.
  117. Matrices are modeled by the library functions as arrays with elements
  118. stored in row order. Thus, the element in the jth row and kth column of
  119. the n by n matrix M, stored in the array mat[], is addressed by
  120. M[j,k] = mat[n*j+k] , with 0 =< j,k <= n-1 .
  121. (Remember that C employs zero as the starting index.) The storage order has
  122. important implications for data locality.
  123. The algorithms employed here all have excellent numerical stability, and
  124. the default double precision arithmetic of C enhances this. Thus, any
  125. problems encountered in using the matrix algebra functions will almost
  126. certainly be due to an ill-conditioned matrix. (The Hilbert matrices,
  127. H[i,j] = 1/(1+i+j) for i,j < n
  128. form a good example of such ill-conditioned systems.) We remind the reader
  129. that the appropriate response to such ill-conditioning is to seek an
  130. alternative approach to the problem. The option of increasing precision has
  131. already been exploited. Modification of the linear algebra algorithm code is
  132. not normally effective in an ill-conditioned problem.
  133. ------------------------------------------------------------------------------
  134. FUNCTION SYNOPSES
  135. ------------------------------------------------------------------------------
  136. Linear System Solutions:
  137. -----------------------------------------------------------------------------
  138. */
  139. /**
  140. \brief Solve a general linear system A*x = b.
  141. \param a = array containing system matrix A in row order (altered to L-U factored form by computation)
  142. \param b = array containing system vector b at entry and solution vector x at exit
  143. \param n = dimension of system
  144. \return 0 -> normal exit; -1 -> singular input
  145. */
  146. int G_math_solv(double **a,double *b,int n)
  147. {
  148. return solv(a[0],b, n);
  149. }
  150. /**
  151. \brief Solve a symmetric positive definite linear system S*x = b.
  152. \param a = array containing system matrix S (altered to Cholesky upper right factor by computation)
  153. \param b = array containing system vector b as input and solution vector x as output
  154. \param n = dimension of system
  155. \return: 0 -> normal exit; -1 -> input matrix not positive definite
  156. */
  157. int G_math_solvps(double **a,double *b,int n)
  158. {
  159. return solvps(a[0], b,n);
  160. }
  161. /**
  162. \brief Solve a tridiagonal linear system M*x = y.
  163. \param a = array containing m+1 diagonal elements of M
  164. \param b = array of m elements below the main diagonal of M
  165. \param c = array of m elements above the main diagonal
  166. \param x = array containing the system vector y initially, and the solution vector at exit (m+1 elements)
  167. \param m = dimension parameter ( M is (m+1)x(m+1) )
  168. */
  169. void G_math_solvtd(double *a,double *b,double *c,double *x,int m)
  170. {
  171. solvtd(a, b, c, x, m);
  172. return;
  173. }
  174. /*
  175. \brief Solve an upper right triangular linear system T*x = b.
  176. \param a = pointer to array of upper right triangular matrix T
  177. \param b = pointer to array of system vector The computation overloads this with the solution vector x.
  178. \param n = dimension (dim(a)=n*n,dim(b)=n)
  179. \return value: f = status flag, with 0 -> normal exit, -1 -> system singular
  180. */
  181. int G_math_solvru(double **a,double *b,int n)
  182. {
  183. return solvru(a[0], b, n);
  184. }
  185. /**
  186. \brief Invert (in place) a general real matrix A -> Inv(A).
  187. \param a = array containing the input matrix A. This is converted to the inverse matrix.
  188. \param n = dimension of the system (i.e. A is n x n )
  189. \return: 0 -> normal exit, 1 -> singular input matrix
  190. */
  191. int G_math_minv(double **a,int n)
  192. {
  193. return minv(a[0], n);
  194. }
  195. /**
  196. \brief Invert (in place) a symmetric real matrix, V -> Inv(V).
  197. The input matrix V is symmetric (V[i,j] = V[j,i]).
  198. \param a = array containing a symmetric input matrix. This is converted to the inverse matrix.
  199. \param n = dimension of the system (dim(v)=n*n)
  200. \return: 0 -> normal exit 1 -> input matrix not positive definite
  201. */
  202. int G_math_psinv(double **a,int n)
  203. {
  204. return psinv( a[0], n);
  205. }
  206. /**
  207. \brief Invert an upper right triangular matrix T -> Inv(T).
  208. \param a = pointer to array of upper right triangular matrix, This is replaced by the inverse matrix.
  209. \param n = dimension (dim(a)=n*n)
  210. \return value: status flag, with 0 -> matrix inverted -1 -> matrix singular
  211. */
  212. int G_math_ruinv(double **a,int n)
  213. {
  214. return ruinv(a[0], n);
  215. }
  216. /*
  217. -----------------------------------------------------------------------------
  218. Symmetric Eigensystem Analysis:
  219. -----------------------------------------------------------------------------
  220. */
  221. /**
  222. \brief Compute the eigenvalues of a real symmetric matrix A.
  223. \param a = pointer to array of symmetric n by n input matrix A. The computation alters these values.
  224. \param ev = pointer to array of the output eigenvalues
  225. \param n = dimension parameter (dim(a)= n*n, dim(ev)= n)
  226. */
  227. void G_math_eigval(double **a,double *ev,int n)
  228. {
  229. eigval(a[0], ev, n);
  230. return;
  231. }
  232. /**
  233. \brief Compute the eigenvalues and eigenvectors of a real symmetric matrix A.
  234. The input and output matrices are related by
  235. A = E*D*E~ where D is the diagonal matrix of eigenvalues
  236. D[i,j] = ev[i] if i=j and 0 otherwise.
  237. The columns of E are the eigenvectors.
  238. \param a = pointer to store for symmetric n by n input matrix A. The computation overloads this with an orthogonal matrix of eigenvectors E.
  239. \param ev = pointer to the array of the output eigenvalues
  240. \param n = dimension parameter (dim(a)= n*n, dim(ev)= n)
  241. */
  242. void G_math_eigen(double **a,double *ev,int n)
  243. {
  244. eigen(a[0], ev, n);
  245. return;
  246. }
  247. /*
  248. \brief Compute the maximum (absolute) eigenvalue and corresponding eigenvector of a real symmetric matrix A.
  249. \param a = array containing symmetric input matrix A
  250. \param u = array containing the n components of the eigenvector at exit (vector normalized to 1)
  251. \param n = dimension of system
  252. \return: ev = eigenvalue of A with maximum absolute value HUGE -> convergence failure
  253. */
  254. double G_math_evmax(double **a,double *u,int n)
  255. {
  256. return evmax(a[0], u, n);
  257. }
  258. /*
  259. ------------------------------------------------------------------------------
  260. Singular Value Decomposition:
  261. ------------------------------------------------------------------------------
  262. A number of versions of the Singular Value Decomposition (SVD)
  263. are implemented in the library. They support the efficient
  264. computation of this important factorization for a real m by n
  265. matrix A. The general form of the SVD is
  266. A = U*S*V~ with S = | D |
  267. | 0 |
  268. where U is an m by m orthogonal matrix, V is an n by n orthogonal matrix,
  269. D is the n by n diagonal matrix of singular value, and S is the singular
  270. m by n matrix produced by the transformation.
  271. The singular values computed by these functions provide important
  272. information on the rank of the matrix A, and on several matrix
  273. norms of A. The number of non-zero singular values d[i] in D
  274. equal to the rank of A. The two norm of A is
  275. ||A|| = max(d[i]) , and the condition number is
  276. k(A) = max(d[i])/min(d[i]) .
  277. The Frobenius norm of the matrix A is
  278. Fn(A) = Sum(i=0 to n-1) d[i]^2 .
  279. Singular values consistent with zero are easily recognized, since
  280. the decomposition algorithms have excellent numerical stability.
  281. The value of a 'zero' d[i] is no larger than a few times the
  282. computational rounding error e.
  283. The matrix U1 is formed from the first n orthonormal column vectors
  284. of U. U1[i,j] = U[i,j] for i = 1 to m and j = 1 to n. A singular
  285. value decomposition of A can also be expressed in terms of the m by\
  286. n matrix U1, with
  287. A = U1*D*V~ .
  288. SVD functions with three forms of output are provided. The first
  289. form computes only the singular values, while the second computes
  290. the singular values and the U and V orthogonal transformation
  291. matrices. The third form of output computes singular values, the
  292. V matrix, and saves space by overloading the input array with
  293. the U1 matrix.
  294. Two forms of decomposition algorithm are available for each of the
  295. three output types. One is computationally efficient when m ~ n.
  296. The second, distinguished by the prefix 'sv2' in the function name,
  297. employs a two stage Householder reduction to accelerate computation
  298. when m substantially exceeds n. Use of functions of the second form
  299. is recommended for m > 2n.
  300. Singular value output from each of the six SVD functions satisfies
  301. d[i] >= 0 for i = 0 to n-1.
  302. -------------------------------------------------------------------------------
  303. */
  304. /**
  305. \brief Compute the singular values of a real m by n matrix A.
  306. \param d = pointer to double array of dimension n (output = singular values of A)
  307. \param a = pointer to store of the m by n input matrix A (A is altered by the computation)
  308. \param m = number of rows in A
  309. \param n = number of columns in A (m>=n required)
  310. \return value: status flag with: 0 -> success -1 -> input error m < n
  311. */
  312. int G_math_svdval(double *d,double **a,int m,int n)
  313. {
  314. return svdval(d, a[0], m, n);
  315. }
  316. /**
  317. \brief Compute singular values when m >> n.
  318. \param d = pointer to double array of dimension n (output = singular values of A)
  319. \param a = pointer to store of the m by n input matrix A (A is altered by the computation)
  320. \param m = number of rows in A
  321. \param n = number of columns in A (m>=n required)
  322. \return value: status flag with: 0 -> success -1 -> input error m < n
  323. */
  324. int G_math_sv2val(double *d,double **a,int m,int n)
  325. {
  326. return sv2val(d, a[0], m, n);
  327. }
  328. /*
  329. \brief Compute the singular value transformation S = U~*A*V.
  330. \param d = pointer to double array of dimension n (output = singular values of A)
  331. \param a = pointer to store of the m by n input matrix A (A is altered by the computation)
  332. \param u = pointer to store for m by m orthogonal matrix U
  333. \param v = pointer to store for n by n orthogonal matrix V
  334. \param m = number of rows in A
  335. \param n = number of columns in A (m>=n required)
  336. \return value: status flag with: 0 -> success -1 -> input error m < n
  337. */
  338. int G_math_svduv(double *d,double **a,double **u,int m,double **v,int n)
  339. {
  340. return svduv(d, a[0], u[0], m, v[0], n);
  341. }
  342. /**
  343. \brief Compute the singular value transformation when m >> n.
  344. \param d = pointer to double array of dimension n (output = singular values of A)
  345. \param a = pointer to store of the m by n input matrix A (A is altered by the computation)
  346. \param u = pointer to store for m by m orthogonal matrix U
  347. \param v = pointer to store for n by n orthogonal matrix V
  348. \param m = number of rows in A
  349. \param n = number of columns in A (m>=n required)
  350. \return value: status flag with: 0 -> success -1 -> input error m < n
  351. */
  352. int G_math_sv2uv(double *d,double **a,double **u,int m,double **v,int n)
  353. {
  354. return sv2uv(d, a[0], u[0], m, v[0], n);
  355. }
  356. /**
  357. \brief Compute the singular value transformation with A overloaded by the partial U-matrix.
  358. \param d = pointer to double array of dimension n
  359. (output = singular values of A)
  360. \param a = pointer to store of the m by n input matrix A (At output a is overloaded by the matrix U1 whose n columns are orthogonal vectors equal to the first n columns of U.)
  361. \param v = pointer to store for n by n orthogonal matrix V
  362. \param m = number of rows in A
  363. \param n = number of columns in A (m>=n required)
  364. \return value: status flag with: 0 -> success -1 -> input error m < n
  365. */
  366. int G_math_svdu1v(double *d,double **a,int m,double **v,int n)
  367. {
  368. return svdu1v(d, a[0], m, v[0], n);
  369. }