GPUMLib  0.2.2
GPU Machine Learning Library
utils.cpp
1 /*
2  Ricardo Quintas is an MSc Student at the University of Coimbra, Portugal
3  Copyright (C) 2009, 2010 Ricardo Quintas
4 
5  This file is part of GPUMLib.
6 
7  GPUMLib is free software: you can redistribute it and/or modify
8  it under the terms of the GNU General Public License as published by
9  the Free Software Foundation, either version 3 of the License, or
10  (at your option) any later version.
11 
12  This program is distributed in the hope that it will be useful,
13  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  GNU General Public License for more details.
16 
17  You should have received a copy of the GNU General Public License
18  along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20 
21 #include "utils.h"
22 
23 void checkStatus(culaStatus status)
24 {
25  if(!status)
26  return;
27 
28  if(status == culaArgumentError)
29  printf("Invalid value for parameter %d\n", culaGetErrorInfo());
30  else if(status == culaDataError)
31  printf("Data error (%d)\n", culaGetErrorInfo());
32  else if(status == culaBlasError)
33  printf("Blas error (%d)\n", culaGetErrorInfo());
34  else if(status == culaRuntimeError)
35  printf("Runtime error (%d)\n", culaGetErrorInfo());
36  else
37  printf("%s\n", culaGetStatusString(status));
38 
39  culaShutdown();
40  exit(EXIT_FAILURE);
41 }
42 
43 
44 namespace UTILS{
45 
46  void writeM(std::string desc, HostMatrix<float> Input){
47 
48  std::ofstream File(desc.c_str());
49 
50  File << desc << "=";
51  File << "[";
52  for(int i=0;i<Input.Rows(); i++){
53  for(int j=0;j<Input.Columns();j++){
54  File << Input(i,j) << " ";
55  }
56  if(i < Input.Rows() - 1)
57  File << ";\n";
58  }
59  File << "];";
60 
61  File.close();
62  }
63 
64  void writeM(std::string desc, DeviceMatrix<float> Mat){
65 
66 
68 
69  writeM(desc,Input);
70 
71  }
72 
73  void printM(std::string desc, HostMatrix<float> Input, bool Order, int Rows){
74 
75  if(!Order){
76  std::cout << desc << " " << Input.Rows() << "x" << Input.Columns() << std::endl;
77  std::cout << "[";
78  for(int i=0;i<Rows; i++){
79  for(int j=0;j<Input.Columns();j++){
80  std::cout << Input(i,j) << " ";
81  }
82  if(i < Rows - 1)
83  std::cout << ";" << std::endl;
84  }
85  std::cout << "]";
86  std::cout << std::endl << std::endl;
87  }else{
88  std::cout << desc << " " << Rows << "x" << Input.Columns() << std::endl;
89  std::cout << "[";
90 
91  for(int i=0;i<Input.Rows(); i++){
92  for(int j=0;j<Input.Columns();j++){
93  std::cout << Input.Pointer()[j*Input.Rows()+i] << " ";
94  }
95  if(i < Input.Rows() - 1)
96  std::cout << ";" << std::endl;
97  }
98  std::cout << "]";
99  std::cout << std::endl << std::endl;
100  }
101  }
102 
103  void printM(std::string desc, HostMatrix<float> Mat, int Rows){
104  printM(desc,Mat,false,Rows);
105  }
106 
107  void printM(std::string desc, HostMatrix<float> Mat){
108  printM(desc,Mat,false,Mat.Rows());
109  }
110 
111  void printM(std::string desc, HostMatrix<float> Mat, bool Order){
112  printM(desc,Mat,Order,Mat.Rows());
113  }
114 
115  void printM(std::string desc, DeviceMatrix<float> Mat, bool Order){
117  printM(desc,Input, Order);
118  }
119 
120  void printM(std::string desc, DeviceMatrix<float> Mat, int Rows){
122  printM(desc,Input,false,Rows);
123  }
124 
125  void printA(std::string desc, HostArray<float> Array){
126 
127  std::cout << desc << " " << Array.Length() << std::endl;
128  std::cout << "[";
129  for(int i=0;i<Array.Length(); i++){
130  std::cout << Array[i] << " ";
131  if(i < Array.Length() - 1)
132  std::cout << ";";
133  }
134  std::cout << "]";
135  std::cout << std::endl << std::endl;
136 
137  }
138 
139  void printA(std::string desc, HostArray<int> Array){
140 
141  std::cout << desc << " " << Array.Length() << std::endl;
142  std::cout << "[";
143  for(int i=0;i<Array.Length(); i++){
144  std::cout << Array[i] << " ";
145  if(i < Array.Length() - 1)
146  std::cout << ";";
147  }
148  std::cout << "]";
149  std::cout << std::endl << std::endl;
150 
151  }
152 
153  void printA(std::string desc, HostArray<float> Array, int Length){
154 
155  std::cout << desc << " " << Length << std::endl;
156  std::cout << "[";
157  for(int i=0;i<Length; i++){
158  std::cout << Array[i] << " ";
159  if(i < Length - 1)
160  std::cout << ";";
161  }
162  std::cout << "]";
163  std::cout << std::endl << std::endl;
164 
165  }
166 
167  void printA(std::string desc, DeviceArray<float> Array){
168 
169  HostArray<float> Input = HostArray<float>(Array);
170 
171  printA(desc,Input);
172 
173  }
174 
175  void printA(std::string desc, DeviceArray<int> Array){
176 
177  HostArray<int> Input = HostArray<int>(Array);
178 
179  printA(desc,Input);
180 
181  }
182 
183 
184  DeviceMatrix<float> pseudoinverse(DeviceMatrix<float> &Input){
185 
186  int M = Input.Rows();
187  int N = Input.Columns();
188 
189  /* Setup SVD Parameters */
190  int LDA = M;
191  int LDU = M;
192  int LDVT = N;
193 
194  char jobu = 'A';
195  char jobvt = 'A';
196 
197  DeviceMatrix<float> S(imin(M,N),1,ColumnMajor);
198  DeviceMatrix<float> U(LDU,M,ColumnMajor);
199  DeviceMatrix<float> VT(LDVT,N,ColumnMajor);
200 
201  checkStatus(culaDeviceSgesvd(jobu, jobvt, M, N, Input.Pointer(), LDA, S.Pointer(), U.Pointer(), LDU, VT.Pointer(), LDVT));
202 
203  DeviceMatrix<float> d_Sigma = DeviceMatrix<float>(N,M,ColumnMajor);
204  cudaMemset(d_Sigma.Pointer(),0,sizeof(float)*d_Sigma.Elements());
205 
206  KernelSigmaInverse(d_Sigma.Pointer(),d_Sigma.Rows(),d_Sigma.Columns(),S.Pointer());
207 
208  DeviceMatrix<float> sigmainverse = DeviceMatrix<float>(N,M,ColumnMajor);
209  DeviceMatrix<float> aux2 = DeviceMatrix<float>(N,M,ColumnMajor);
210 
211  cublasSgemm('t', 'n', N, M, N, 1, VT.Pointer(), LDVT, d_Sigma.Pointer(), N, 0, aux2.Pointer(), N);
212  cublasSgemm('n', 't', N, M, M, 1, aux2.Pointer(), N, U.Pointer(), M, 0, sigmainverse.Pointer(), N);
213 
214  return sigmainverse;
215  }
216 
217 }
int Elements() const
Definition: BaseMatrix.h:94
#define imin(X, Y)
Functions used to create a radial basis functions network that can be trained using the CUDA implemen...
Definition: rbf.h:38
Type * Pointer() const
Definition: BaseMatrix.h:88
int Columns() const
Definition: BaseMatrix.h:80
int Length() const
Definition: BaseArray.h:63
int Rows() const
Definition: BaseMatrix.h:74
void KernelSigmaInverse(float *Output, int output_height, int output_width, cudafloat *S)
Definition: RANKernels.cu:217