[opengm] 21/386: now this solver do grid search learning (todo: rename file)
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:35:00 UTC 2016
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch debian/master
in repository opengm.
commit 61116cd31febf836f7abb4e0f45ce658dce81ec6
Author: joergkappes <kappes at math.uni-heidelberg.de>
Date: Wed Nov 19 15:40:29 2014 +0100
now this solver do grid search learning (todo: rename file)
---
include/opengm/learning/randomlearner.hxx | 102 ++++++++++++++++++++----------
1 file changed, 69 insertions(+), 33 deletions(-)
diff --git a/include/opengm/learning/randomlearner.hxx b/include/opengm/learning/randomlearner.hxx
index 7bb211c..353814b 100644
--- a/include/opengm/learning/randomlearner.hxx
+++ b/include/opengm/learning/randomlearner.hxx
@@ -1,6 +1,6 @@
#pragma once
-#ifndef OPENGM_RANDOM_LEARNER_HXX
-#define OPENGM_RANDOM_LEARNER_HXX
+#ifndef OPENGM_GRIDSEARCH_LEARNER_HXX
+#define OPENGM_GRIDSEARCH_LEARNER_HXX
#include <vector>
#include <opengm/functions/learnablefunction.hxx>
@@ -9,22 +9,24 @@ namespace opengm {
namespace learning {
template<class DATASET, class LOSS>
- class RandomLearner
+ class GridSearchLearner
{
public:
- typedef opengm::GraphicalModel<double,opengm::Adder,typename opengm::meta::TypeListGenerator<opengm::ExplicitFunction<double>, opengm::functions::learnable::LPotts<double> >::type, opengm::DiscreteSpace<size_t, size_t> >GMType; // This will be constructed as a combination of DATASET and LOSS (especially the functiontypelist
-
+ typedef typename DATASET::GMType GMType;
+ typedef typename GMType::ValueType ValueType;
+ typedef typename GMType::IndexType IndexType;
+ typedef typename GMType::LabelType LabelType;
class Parameter{
public:
std::vector<double> parameterUpperbound_;
std::vector<double> parameterLowerbound_;
- size_t iterations_;
- Parameter():iterations_(10){;}
+ std::vector<size_t> testingPoints_;
+ Parameter(){;}
};
- RandomLearner(DATASET&, Parameter& );
+ GridSearchLearner(DATASET&, Parameter& );
template<class INF>
void learn(typename INF::Parameter& para);
@@ -41,47 +43,81 @@ namespace opengm {
};
template<class DATASET, class LOSS>
- RandomLearner<DATASET, LOSS>::RandomLearner(DATASET& ds, Parameter& p )
+ GridSearchLearner<DATASET, LOSS>::GridSearchLearner(DATASET& ds, Parameter& p )
: dataset_(ds), para_(p)
{
- modelParameters_ = opengm::Parameters<double,size_t>(ds.numberOfParameters());
- if(para_.parameterUpperbound_ != ds.numberOfParameters())
- para_.parameterUpperbound_.resize(ds.numberOfParameters(),1000.0);
- if(para_.parameterLowerbound_ != ds.numberOfParameters())
- para_.parameterLowerbound_.resize(ds.numberOfParameters(),-1000.0);
+ modelParameters_ = opengm::Parameters<double,size_t>(ds.getNumberOfParameters());
+ if(para_.parameterUpperbound_.size() != ds.getNumberOfParameters())
+ para_.parameterUpperbound_.resize(ds.getNumberOfParameters(),10.0);
+ if(para_.parameterLowerbound_.size() != ds.getNumberOfParameters())
+ para_.parameterLowerbound_.resize(ds.getNumberOfParameters(),0.0);
+ if(para_.testingPoints_.size() != ds.getNumberOfParameters())
+ para_.testingPoints_.resize(ds.getNumberOfParameters(),10);
}
template<class DATASET, class LOSS>
template<class INF>
- void RandomLearner<DATASET, LOSS>::learn(typename INF::Parameter& para){
+ void GridSearchLearner<DATASET, LOSS>::learn(typename INF::Parameter& para){
// generate model Parameters
- std::vector< opengm::Parameters<double,size_t> > paras(para_.iterations_, opengm::Parameters<double,size_t>( dataset_.numberOfParameters()));
- std::vector< double > loss(para_.iterations_,0);
+ opengm::Parameters<double,size_t> modelPara( dataset_.getNumberOfParameters() );
+ opengm::Parameters<double,size_t> bestModelPara( dataset_.getNumberOfParameters() );
+ double bestLoss = 100000000.0;
+ std::vector<size_t> itC(dataset_.getNumberOfParameters(),0);
- for(size_t i=0;i<para_.iterations_;++i){
- // following is a very stupid parameter selection and not usefull with more than 1 parameter
- for(size_t p=0; p< dataset_.numberOfParameters(); ++p){
- paras[i][p] = para_.parameterLowerbound_[p] + double(i)/double(para_.iterations_)*(para_.parameterUpperbound_[p]-para_.parameterLowerbound_[p]);
- }
- }
LOSS lossFunction;
- size_t best = 0;
- for(size_t i=0;i<para_.iterations_;++i){
- opengm::Parameters<double,size_t> mp = dataset_.getModelParameter();
- mp = paras[i];
- std::vector< std::vector<typename INF::LabelType> > confs( dataset_.numberOfModels() );
- for(size_t m=0; m<dataset_.numberOfModels(); ++m){
+ bool search=true;
+ while(search){
+ // Get Parameter
+ for(size_t p=0; p<dataset_.getNumberOfParameters(); ++p){
+ modelPara.setParameter(p, para_.parameterLowerbound_[p] + double(itC[p])/double(para_.testingPoints_[p]-1)*(para_.parameterUpperbound_[p]-para_.parameterLowerbound_[p]) );
+ }
+ // Evaluate Loss
+ opengm::Parameters<double,size_t>& mp = dataset_.getModelParameters();
+ mp = modelPara;
+ std::vector< std::vector<typename INF::LabelType> > confs( dataset_.getNumberOfModels() );
+ double loss = 0;
+ for(size_t m=0; m<dataset_.getNumberOfModels(); ++m){
INF inf( dataset_.getModel(m),para);
inf.infer();
inf.arg(confs[m]);
const std::vector<typename INF::LabelType>& gt = dataset_.getGT(m);
- loss[i] += lossFunction.loss(confs[m].begin(), confs[m].end(), gt.begin(), gt.end());
+ loss += lossFunction.loss(confs[m].begin(), confs[m].end(), gt.begin(), gt.end());
+ }
+
+ // *call visitor*
+ for(size_t p=0; p<dataset_.getNumberOfParameters(); ++p){
+ std::cout << modelPara[p] <<" ";
}
- if(loss[i]<loss[best])
- best=i;
+ std::cout << " ==> ";
+ std::cout << loss << std::endl;
+ // **************
+
+ if(loss<bestLoss){
+ bestLoss=loss;
+ bestModelPara=modelPara;
+ }
+ //Increment Parameter
+ for(size_t p=0; p<dataset_.getNumberOfParameters(); ++p){
+ if(itC[p]<para_.testingPoints_[p]-1){
+ ++itC[p];
+ break;
+ }
+ else{
+ itC[p]=0;
+ if (p==dataset_.getNumberOfParameters()-1)
+ search = false;
+ }
+ }
+
+ }
+ std::cout << "Best"<<std::endl;
+ for(size_t p=0; p<dataset_.getNumberOfParameters(); ++p){
+ std::cout << bestModelPara[p] <<" ";
}
- modelParameters_ = para[best];
+ std::cout << " ==> ";
+ std::cout << bestLoss << std::endl;
+ modelParameters_ = bestModelPara;
};
}
}
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git
More information about the debian-science-commits
mailing list