[opengm] 15/386: syntax checking and bugfixes

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:34:59 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 3436626f6b940c1a878a11e66f5157954d188c30
Author: joergkappes <kappes at math.uni-heidelberg.de>
Date:   Mon May 19 11:23:42 2014 +0200

    syntax checking and bugfixes
---
 include/opengm/learning/dataset/testdataset.hxx | 46 ++++++++++++-------------
 include/opengm/learning/loss/hammingloss.hxx    |  2 +-
 include/opengm/learning/randomlearner.hxx       | 43 +++++++++++++++++++----
 src/unittest/CMakeLists.txt                     |  3 ++
 src/unittest/test_randomlearner.cxx             | 21 +++++++++++
 5 files changed, 84 insertions(+), 31 deletions(-)

diff --git a/include/opengm/learning/dataset/testdataset.hxx b/include/opengm/learning/dataset/testdataset.hxx
index 5e18647..a6b6aaf 100644
--- a/include/opengm/learning/dataset/testdataset.hxx
+++ b/include/opengm/learning/dataset/testdataset.hxx
@@ -10,12 +10,12 @@ namespace opengm {
    namespace datasets{
 
       template<class GM>
-      class TestDatset<GM>{
+      class TestDataset{
       public:
-         typename typedef GM::ValueType ValueType;
-         typename typedef GM::IndexType IndexType;
-         typename typedef GM::LabelType LabelType; 
-         typename opengm::Parameters<ValueType,IndexType> ModelParameter;
+         typedef typename GM::ValueType ValueType;
+         typedef typename GM::IndexType IndexType;
+         typedef typename GM::LabelType LabelType; 
+         typedef opengm::Parameters<ValueType,IndexType> ModelParameters;
 
          const GM&                     getModel(const size_t i)  { return gms_[i]; }
          const std::vector<LabelType>& getGT(const size_t i)     { return gt_; }
@@ -23,26 +23,24 @@ namespace opengm {
          size_t                        getNumberOfParameters()   { return 1; }
          size_t                        getNumberOfModels()       { return gms_.size(); } 
          
-         TestDatset();
-         TestDatset(size_t); 
+         TestDataset(size_t numModels=3); 
 
       private:
          std::vector<GM> gms_; 
          std::vector<LabelType> gt_; 
-         opengm::Parameters<ValueType,IndexType> modelParameter_;
+         ModelParameters modelParameters_;
       };
       
-       template<class GM>
-       TestDatset<GM>::TestDataset():TestDataset(3){};
+
 
       template<class GM>
-       TestDatset<GM>::TestDataset(size_t numModels)
-          : modelParameter_(ModelParameter(1));
+      TestDataset<GM>::TestDataset(size_t numModels)
+         : modelParameters_(ModelParameters(1))
       {
          LabelType numberOfLabels = 2;
          gt_.resize(64*64,0);
          for(size_t i=32*64; i<64*64; ++i){
-            ft[i] = 1;
+            gt_[i] = 1;
          }
          gms_.resize(numModels);
          for(size_t m=0; m<numModels; ++m){
@@ -53,30 +51,30 @@ namespace opengm {
                   // function
                   const size_t shape[] = {numberOfLabels};
                   ExplicitFunction<double> f(shape, shape + 1);
-                  ValueType val = double(gt_[y*64+x]) + (double) std::rand() / (double) (std::RAND_MAX) * 0.75 ;
-                  f(0) = sta::fabs(val-0);
-                  f(1) = sta::fabs(val-1);
-                  typename GM::FunctionIdentifier fid = gm.addFunction(f);
+                  ValueType val = (double)(gt_[y*64+x]) + (double)(std::rand()) / (double) (RAND_MAX) * 0.75 ;
+                  f(0) = std::fabs(val-0);
+                  f(1) = std::fabs(val-1);
+                  typename GM::FunctionIdentifier fid =  gms_[m].addFunction(f);
 
                   // factor
                   size_t variableIndices[] = {y*64+x};
-                  gm.addFactor(fid, variableIndices, variableIndices + 1);
+                  gms_[m].addFactor(fid, variableIndices, variableIndices + 1);
                }
             }
           
-            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(modelParameter_,2,std::vector<size_t>(1,0),std::vector<double>(1,1));
-            typename GM::FunctionIdentifier fid = gm.addFunction(f);      
+            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(modelParameters_,2,std::vector<size_t>(1,0),std::vector<double>(1,1));
+            typename GM::FunctionIdentifier fid = gms_[m].addFunction(f);      
             for(size_t y = 0; y < 64; ++y){ 
                for(size_t x = 0; x < 64; ++x) {
                   if(x + 1 < 64) { // (x, y) -- (x + 1, y)
                      size_t variableIndices[] = {y*64+x, y*64+x+1};
-                     sort(variableIndices, variableIndices + 2);
-                     gm.addFactor(fid, variableIndices, variableIndices + 2);
+                     //sort(variableIndices, variableIndices + 2);
+                     gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
                   }
                   if(y + 1 < 64) { // (x, y) -- (x, y + 1)
                      size_t variableIndices[] = {y*64+x, (y+1)*64+x};
-                     sort(variableIndices, variableIndices + 2);
-                     gm.addFactor(fid, variableIndices, variableIndices + 2);
+                     //sort(variableIndices, variableIndices + 2);
+                     gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
                   }
                }    
             }
diff --git a/include/opengm/learning/loss/hammingloss.hxx b/include/opengm/learning/loss/hammingloss.hxx
index ec13eda..4e543bd 100644
--- a/include/opengm/learning/loss/hammingloss.hxx
+++ b/include/opengm/learning/loss/hammingloss.hxx
@@ -33,7 +33,7 @@ namespace opengm {
 
          for(typename GM::IndexType i=0; i<gm.numberOfVariables(); ++i){
             typename GM::LabelType numL = gm.numberOFLabels(i);
-            opengm::ExplicitFunction<GM::ValueType,GM::IndexType, GM::LabelType> f(&numL, &(numL)+1,1);
+            opengm::ExplicitFunction<typename GM::ValueType,typename GM::IndexType, typename GM::LabelType> f(&numL, &(numL)+1,1);
             f(*gt) = 0;
             ++gt;
             gm.addFactor(gm.addfunction(f), &i, &(i)+1);
diff --git a/include/opengm/learning/randomlearner.hxx b/include/opengm/learning/randomlearner.hxx
index 2019feb..7bb211c 100644
--- a/include/opengm/learning/randomlearner.hxx
+++ b/include/opengm/learning/randomlearner.hxx
@@ -7,11 +7,12 @@
 
 namespace opengm {
    namespace learning {
+
       template<class DATASET, class LOSS>
-      class RandomLearner<DATASET, LOSS>
+      class RandomLearner
       {
-      public:
-         typedef GMType; // This will be constructed as a combination of DATASET and LOSS (especially the functiontypelist
+      public: 
+         typedef opengm::GraphicalModel<double,opengm::Adder,typename opengm::meta::TypeListGenerator<opengm::ExplicitFunction<double>, opengm::functions::learnable::LPotts<double> >::type, opengm::DiscreteSpace<size_t, size_t> >GMType; // This will be constructed as a combination of DATASET and LOSS (especially the functiontypelist
 
 
          class Parameter{
@@ -26,11 +27,11 @@ namespace opengm {
          RandomLearner(DATASET&, Parameter& );
 
          template<class INF>
-         void learn(typename INF::Parameter para); 
+         void learn(typename INF::Parameter& para); 
          //template<class INF, class VISITOR>
          //void learn(typename INF::Parameter para, VITITOR vis);
 
-         const opengm::Parameters<ValueType,IndexType>& getModelParameters(){return modelParameters_;} 
+         const opengm::Parameters<double,size_t>& getModelParameters(){return modelParameters_;} 
          Parameter& getLerningParameters(){return para_;}
 
       private:
@@ -44,13 +45,43 @@ namespace opengm {
          : dataset_(ds), para_(p)
       {
          modelParameters_ = opengm::Parameters<double,size_t>(ds.numberOfParameters());
+         if(para_.parameterUpperbound_ != ds.numberOfParameters())
+            para_.parameterUpperbound_.resize(ds.numberOfParameters(),1000.0);  
+         if(para_.parameterLowerbound_ != ds.numberOfParameters())
+            para_.parameterLowerbound_.resize(ds.numberOfParameters(),-1000.0); 
       }
 
 
       template<class DATASET, class LOSS>
       template<class INF>
       void RandomLearner<DATASET, LOSS>::learn(typename INF::Parameter& para){
-         //todo
+         // generate model Parameters
+         std::vector< opengm::Parameters<double,size_t> > paras(para_.iterations_, opengm::Parameters<double,size_t>( dataset_.numberOfParameters()));
+         std::vector< double >                            loss(para_.iterations_,0);
+
+         for(size_t i=0;i<para_.iterations_;++i){
+            // following is a very stupid parameter selection and not usefull with more than 1 parameter
+            for(size_t p=0; p< dataset_.numberOfParameters(); ++p){
+               paras[i][p] = para_.parameterLowerbound_[p] + double(i)/double(para_.iterations_)*(para_.parameterUpperbound_[p]-para_.parameterLowerbound_[p]);
+            }
+         }
+         LOSS lossFunction;
+         size_t best = 0;
+         for(size_t i=0;i<para_.iterations_;++i){
+            opengm::Parameters<double,size_t> mp =  dataset_.getModelParameter();
+            mp = paras[i];
+            std::vector< std::vector<typename INF::LabelType> > confs( dataset_.numberOfModels() );
+            for(size_t m=0; m<dataset_.numberOfModels(); ++m){
+               INF inf( dataset_.getModel(m),para);
+               inf.infer();
+               inf.arg(confs[m]);
+               const std::vector<typename INF::LabelType>& gt =  dataset_.getGT(m);
+               loss[i] += lossFunction.loss(confs[m].begin(), confs[m].end(), gt.begin(), gt.end());
+            }
+            if(loss[i]<loss[best])
+               best=i;
+         }
+         modelParameters_ = para[best];
       };
    }
 }
diff --git a/src/unittest/CMakeLists.txt b/src/unittest/CMakeLists.txt
index 33ab747..ed5681b 100644
--- a/src/unittest/CMakeLists.txt
+++ b/src/unittest/CMakeLists.txt
@@ -4,6 +4,9 @@ add_definitions(-DOPENGM_DEBUG)
 if(BUILD_TESTING)
 
 
+   add_executable(test-randomlearner test_randomlearner.cxx ${headers})
+   add_test(test-randomlearner ${CMAKE_CURRENT_BINARY_DIR}/test-randomlearner)
+
    add_executable(test-gm-learning-functions test_gm_learning_functions.cxx ${headers})
    add_test(test-gm-learning-functions ${CMAKE_CURRENT_BINARY_DIR}/test-gm-learning-functions)
 
diff --git a/src/unittest/test_randomlearner.cxx b/src/unittest/test_randomlearner.cxx
new file mode 100644
index 0000000..01f52bd
--- /dev/null
+++ b/src/unittest/test_randomlearner.cxx
@@ -0,0 +1,21 @@
+#include <vector>
+
+#include <opengm/functions/explicit_function.hxx>
+#include <opengm/unittests/test.hxx>
+#include <opengm/graphicalmodel/graphicalmodel.hxx>
+#include <opengm/operations/adder.hxx>
+#include <opengm/operations/minimizer.hxx>
+#include <opengm/inference/icm.hxx>
+#include <opengm/utilities/metaprogramming.hxx>
+
+#include <opengm/functions/learnable/lpotts.hxx>
+#include <opengm/learning/randomlearner.hxx>
+#include <opengm/learning/loss/hammingloss.hxx>
+#include <opengm/learning/dataset/testdataset.hxx>
+
+
+
+int main() {
+   std::cout << " Includes are fine :-) " << std::endl;
+
+}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list