GPUMLib  0.2.2
GPU Machine Learning Library
Public Member Functions | List of all members

Represents an SVM which can be used to train and classify datasets on the GPU device. More...

#include <SVM.h>

Public Member Functions

 SVM ()
 Constructor to represent a SVM on the GPU. Used as a placeholder to execute device code.
 
int getSupportVectorIndices (GPUMLib::HostArray< cudafloat > &h_alphas, int *alpha_indices, int size)
 
int findMinimumPositionTarget_HostArray (GPUMLib::HostArray< int > &array, int array_length, int target)
 
void calculateB_1stPass (cudaStream_t stream, int blocks, int blockSize, cudafloat *offsets, cudafloat *results, int n_svs)
 
void calculateB_FinalPass (cudaStream_t stream, int blockSize, cudafloat *input_floats, int input_size)
 
void kernelFirstOrderHeuristic1stPass (cudaStream_t stream, int blocks, int blockSize, cudafloat *f, cudafloat *alphas, int *y, cudafloat *minimuns, int *min_indices, cudafloat *maximuns, int *max_indices, int input_size, cudafloat constant_epsilon, cudafloat constant_c)
 
void kernelFirstOrderHeuristicFinalPass (cudaStream_t stream, int blockSize, cudafloat *minimuns_input, int *min_indices_input, cudafloat *maximuns_input, int *max_indices_input, int input_size)
 
void updateAlphas (cudaStream_t stream, GPUMLib::svm_kernel_type kernel_type, GPUMLib::DeviceMatrix< cudafloat > &d_x, GPUMLib::DeviceArray< cudafloat > &d_alphas, GPUMLib::DeviceArray< int > &d_y, cudafloat constant_c_negative, cudafloat constant_c_positive, GPUMLib::DeviceArray< cudafloat > &d_kernel_args, int training_dataset_size, int ndims)
 
void updateKKTConditions (cudaStream_t stream, GPUMLib::svm_kernel_type kernel_type, int n_blocks, int blocksize, GPUMLib::DeviceArray< cudafloat > &d_f, GPUMLib::DeviceArray< int > &d_y, GPUMLib::DeviceMatrix< cudafloat > &d_x, GPUMLib::DeviceArray< cudafloat > &d_kernel_args, int training_dataset_size, int ndims)
 
void checkCUDA_Errors ()
 
void runSMO (GPUMLib::HostMatrix< cudafloat > &h_x, GPUMLib::HostArray< int > &h_y, GPUMLib::DeviceArray< cudafloat > &d_alphas, cudafloat constant_c_negative, cudafloat constant_c_positive, cudafloat constant_epsilon, cudafloat constant_tau, GPUMLib::svm_kernel_type kernel_type, cudafloat *kernel_args, int amount_threads)
 
void train (GPUMLib::HostMatrix< cudafloat > &h_samples, GPUMLib::HostArray< int > &h_classes, cudafloat constant_c_negative, cudafloat constant_c_positive, cudafloat constant_epsilon, cudafloat constant_tau, svm_kernel_type kernel_type, cudafloat *kernel_args, int amount_threads, GPUMLib::HostArray< cudafloat > &h_alphas, int &n_sv, GPUMLib::HostMatrix< cudafloat > &h_model, cudafloat &h_b)
 
void classify (GPUMLib::HostMatrix< cudafloat > &h_model, GPUMLib::HostMatrix< cudafloat > &h_testing_samples, cudafloat *kernel_args, int amount_threads, GPUMLib::svm_kernel_type kernel_type, int n_sv, cudafloat h_b, int ndims, GPUMLib::HostArray< int > &h_testing_results)
 

Detailed Description

Represents an SVM which can be used to train and classify datasets on the GPU device.

Definition at line 48 of file SVM.h.

Member Function Documentation

void calculateB_1stPass ( cudaStream_t  stream,
int  blocks,
int  blockSize,
cudafloat offsets,
cudafloat results,
int  n_svs 
)
inline

Does the first reduction pass (using multiple blocks) to compute the hyperplane's offset (b) - basically a sum-reduce function

Parameters
streamA cudaStream_t context to associate this execution with
blocksThe number of CUDA blocks
blockSizeThe size of each CUDA block (must be a power of 2)
offsetsThe array with the offsets (the real output of the SVM classification without the sgn(...) operator) of each Support Vector
resultsThe output array where to reduce (sum) the input offsets, where each index corresponds to a individual block
n_svsThe size of the offsets array (number of SVs)

Definition at line 101 of file SVM.h.

void calculateB_FinalPass ( cudaStream_t  stream,
int  blockSize,
cudafloat input_floats,
int  input_size 
)
inline

Does the last reduction pass (using one block) to compute the hyperplane's offset (b) - basically a sum-reduce function

Parameters
streamA cudaStream_t context to associate this execution with
blockSizeThe size of each CUDA block (must be a power of 2)
input_floatsThe array with the offsets (the real output of the SVM classification without the sgn(...) operator) of each Support Vector resulting from the first pass
input_sizeThe size of the offsets array (number of blocks in the first passage)

Definition at line 138 of file SVM.h.

void checkCUDA_Errors ( )
inline

Checks for CUDA errors occurred before this call. If an error occurred, it is printed to stdout.

Definition at line 380 of file SVM.h.

void classify ( GPUMLib::HostMatrix< cudafloat > &  h_model,
GPUMLib::HostMatrix< cudafloat > &  h_testing_samples,
cudafloat kernel_args,
int  amount_threads,
GPUMLib::svm_kernel_type  kernel_type,
int  n_sv,
cudafloat  h_b,
int  ndims,
GPUMLib::HostArray< int > &  h_testing_results 
)
inline

Launches the SVM classification algorithm.

Parameters
h_modelThe HostMatrix containing the SVM model (samples and alphas for each one)
h_testing_samplesA HostMatrix containing the samples/patterns to be classified
kernel_argsThe DeviceArray listing the arguments for the given kernel
amount_threadsThe maximum number of threads used in each CUDA block
kernel_typeThe svm_kernel_type (enum) of used kernel (gaussian, linear, ukf, etc.)
n_svThe The number of Support Vectors (samples with alphas > 0) in the model
h_bThe hyperplane's bias (offset)
ndimsThe number of dimensions in the training set (size of each sample)
h_testing_resultsThe HostArray where to store the classification results.

Definition at line 737 of file SVM.h.

int findMinimumPositionTarget_HostArray ( GPUMLib::HostArray< int > &  array,
int  array_length,
int  target 
)
inline

Finds the minimum (first) position in the array where the target (int) occurs

Parameters
arrayThe HostArray where to search
array_lengthThe element to search for
targetThe value to find in the given array
Returns
The position of the given target in the array (if found), -1 if not found

Definition at line 80 of file SVM.h.

int getSupportVectorIndices ( GPUMLib::HostArray< cudafloat > &  h_alphas,
int *  alpha_indices,
int  size 
)
inline

Gets the indices of the non zero alphas (Support Vectors)

Parameters
h_alphasA HostArray containing the alphas
alpha_indicesAn array of integers to store the indices of the non zero alphas
sizeThe size of the input alphas array (actually, the amount of samples)
Returns
The amount of non zero alphas (Support Vectors) found

Definition at line 61 of file SVM.h.

void kernelFirstOrderHeuristic1stPass ( cudaStream_t  stream,
int  blocks,
int  blockSize,
cudafloat f,
cudafloat alphas,
int *  y,
cudafloat minimuns,
int *  min_indices,
cudafloat maximuns,
int *  max_indices,
int  input_size,
cudafloat  constant_epsilon,
cudafloat  constant_c 
)
inline

Does the first pass of the first order alphas search heuristic

Parameters
streamA cudaStream_t context to associate this execution with
blocksThe number of CUDA blocks
blockSizeThe size of each CUDA block (must be a power of 2)
yThe array containing the classes, one for each sample
fThe array of optimality conditions for each sample
alphasThe array of alphas (lagrange multipliers)
minimunsThe array where to store the minimum f value found in this CUDA block
min_indicesThe position of the minimum found in this search in the array f
maximunsThe array where to store the maximum f value found in this CUDA block
max_indicesThe position of the maximum found in this search in the array f
input_sizeThe size of the array f, which is equal to the amount of samples
constant_epsilonThe epsilon tolerance used in the first order search heuristic
constant_cThe penalization constant.

Definition at line 196 of file SVM.h.

void kernelFirstOrderHeuristicFinalPass ( cudaStream_t  stream,
int  blockSize,
cudafloat minimuns_input,
int *  min_indices_input,
cudafloat maximuns_input,
int *  max_indices_input,
int  input_size 
)
inline

Does the last pass of the first order alphas search heuristic. This function basically performs a simultaneous max/min reduction. The results are store in device's variables: d_b_high, d_i_high, d_b_low, d_i_low.

Parameters
streamA cudaStream_t context to associate this execution with
blockSizeThe size of each CUDA block (must be a power of 2)
minimuns_inputThe array containing a list of values to search for a minimum
min_indices_inputThe position of the minimum found in this search in the optimality array
maximuns_inputThe array containing a list of values to search for a maximum
max_indices_inputThe position of the maximum found in this search in the optimality array
input_sizeThe size of the array input arrays, which is equal to the amount blocks in the previous pass

Definition at line 252 of file SVM.h.

void runSMO ( GPUMLib::HostMatrix< cudafloat > &  h_x,
GPUMLib::HostArray< int > &  h_y,
GPUMLib::DeviceArray< cudafloat > &  d_alphas,
cudafloat  constant_c_negative,
cudafloat  constant_c_positive,
cudafloat  constant_epsilon,
cudafloat  constant_tau,
GPUMLib::svm_kernel_type  kernel_type,
cudafloat kernel_args,
int  amount_threads 
)
inline

Launches the SMO training algorithm

Parameters
h_xA HostMatrix containing the training samples/patterns
h_yA HostArray containing the classes of the training samples
d_alphasA DeviceArray where to store the alpha/lagrange multiplier associated with each sample
constant_c_negativeThe penalization constant associated with the negative class (also used for the positive class)
constant_c_positiveThe penalization constant associated with the positive class (not used)
constant_epsilonThe epsilon tolerance used in the first order search heuristic
constant_tauThe threshold used for checking the duality gap/convergence
kernel_typeThe svm_kernel_type (enum) of used kernel (gaussian, linear, ukf, etc.)
kernel_argsThe DeviceArray listing the arguments for the given kernel
amount_threadsThe maximum number of threads used in each CUDA block

Definition at line 404 of file SVM.h.

void train ( GPUMLib::HostMatrix< cudafloat > &  h_samples,
GPUMLib::HostArray< int > &  h_classes,
cudafloat  constant_c_negative,
cudafloat  constant_c_positive,
cudafloat  constant_epsilon,
cudafloat  constant_tau,
svm_kernel_type  kernel_type,
cudafloat kernel_args,
int  amount_threads,
GPUMLib::HostArray< cudafloat > &  h_alphas,
int &  n_sv,
GPUMLib::HostMatrix< cudafloat > &  h_model,
cudafloat h_b 
)
inline

Launches the SVM training algorithm, calling internally the SMO and returning the array of Support Vectors.

Parameters
h_samplesA HostMatrix containing the training samples/patterns
h_classesA HostArray containing the training samples' classes
constant_c_negativeThe penalization constant associated with the negative class (also used for the positive class)
constant_c_positiveThe penalization constant associated with the positive class (not used)
constant_epsilonThe epsilon tolerance used in the first order search heuristic
constant_tauThe threshold used for checking the duality gap/convergence
kernel_typeThe svm_kernel_type (enum) of used kernel (gaussian, linear, ukf, etc.)
kernel_argsThe DeviceArray listing the arguments for the given kernel
amount_threadsThe maximum number of threads used in each CUDA block
h_alphasThe HostArray where to store the alphas for each sample after the training process
n_svThe The number of Support Vectors (samples with alphas > 0)
h_modelThe HostMatrix containing the SVM model (samples and alphas for each one)
h_bThe hyperplane's bias (offset)

Definition at line 604 of file SVM.h.

void updateAlphas ( cudaStream_t  stream,
GPUMLib::svm_kernel_type  kernel_type,
GPUMLib::DeviceMatrix< cudafloat > &  d_x,
GPUMLib::DeviceArray< cudafloat > &  d_alphas,
GPUMLib::DeviceArray< int > &  d_y,
cudafloat  constant_c_negative,
cudafloat  constant_c_positive,
GPUMLib::DeviceArray< cudafloat > &  d_kernel_args,
int  training_dataset_size,
int  ndims 
)
inline

Updates the alphas stored in the device's memory (old/new alphas for both low and high indices). Currently only using the negative C constant for both classes.

Parameters
streamA cudaStream_t context to associate this execution with
kernel_typeThe svm_kernel_type (enum) of used kernel (gaussian, linear, ukf, etc.)
d_xThe DeviceMatrix containing the attributes for each sample
d_alphasThe DeviceArray containing the alpha associated with each training sample
d_yThe DeviceArray containing the array of classes, one for each training sample
constant_c_negativeThe penalization constant associated with the negative class
constant_c_positiveThe penalization constant associated with the positive class
d_kernel_argsThe DeviceArray listing the arguments for the given kernel
training_dataset_sizeThe number of samples used in the training process
ndimsThe number of attributes/features.

Definition at line 307 of file SVM.h.

void updateKKTConditions ( cudaStream_t  stream,
GPUMLib::svm_kernel_type  kernel_type,
int  n_blocks,
int  blocksize,
GPUMLib::DeviceArray< cudafloat > &  d_f,
GPUMLib::DeviceArray< int > &  d_y,
GPUMLib::DeviceMatrix< cudafloat > &  d_x,
GPUMLib::DeviceArray< cudafloat > &  d_kernel_args,
int  training_dataset_size,
int  ndims 
)
inline

Updates the KKT conditions.

Parameters
streamA cudaStream_t context to associate this execution with
kernel_typeThe svm_kernel_type (enum) of used kernel (gaussian, linear, ukf, etc.)
n_blocksThe number of CUDA blocks to execute
blocksizeThe number of threads in each block
d_fThe array where to update the KKT conditions
d_yThe DeviceArray containing the array of classes, one for each training sample
d_xThe DeviceMatrix containing the attributes for each sample
d_kernel_argsThe DeviceArray listing the arguments for the given kernel
training_dataset_sizeThe number of samples used in the training process
ndimsThe number of attributes/features.

Definition at line 354 of file SVM.h.


The documentation for this class was generated from the following file: