[opengm] 126/386: add: new test to test_learning

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:35:53 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit c44dd714e869191a60f91a75e2421ab0caa465a1
Author: Steffen-Wolf <Steffen.Wolf at stud.uni-heidelberg.de>
Date:   Thu Dec 18 15:44:20 2014 +0100

    add: new test to test_learning
---
 .../opengm/learning/dataset/editabledataset.hxx    |  25 +++++
 include/opengm/learning/dataset/testdatasets.hxx   |  80 ++++++++++++++
 include/opengm/learning/gradient-accumulator.hxx   |   2 +-
 include/opengm/learning/struct-max-margin.hxx      |   1 +
 src/unittest/learning/CMakeLists.txt               |   2 +-
 src/unittest/learning/test_learning.cxx            | 121 +++++++++++++++++++++
 6 files changed, 229 insertions(+), 2 deletions(-)

diff --git a/include/opengm/learning/dataset/editabledataset.hxx b/include/opengm/learning/dataset/editabledataset.hxx
index ed84663..e42ac2d 100644
--- a/include/opengm/learning/dataset/editabledataset.hxx
+++ b/include/opengm/learning/dataset/editabledataset.hxx
@@ -12,6 +12,19 @@
 namespace opengm {
    namespace datasets{
 
+    // template< typename Weights >
+    // struct LinkWeights{
+
+    //     Weights& w_;
+    //     LinkWeights(const Weights& w):w_(w){}
+
+    //     template<class FUNCTION>
+    //     void operator()(const FUNCTION & function)
+    //     {
+    //         function.setWeights(w_);
+    //     }
+    // };
+
      template<class GM, class LOSS>
       class EditableDataset : public Dataset<GM, LOSS>{
       public:
@@ -29,6 +42,7 @@ namespace opengm {
          EditableDataset(std::vector<GM>& gms, std::vector<GTVector >& gts, std::vector<LossParameterType>& lossParams);
 
          void setInstance(const size_t i, const GM& gm, const GTVector& gt, const LossParameterType& p=LossParameterType());
+         void setGT(const size_t i, const GTVector& gt);
          void pushBackInstance(const GM& gm, const GTVector& gt, const LossParameterType& p=LossParameterType());
          void setWeights(Weights& w);
       };
@@ -58,6 +72,13 @@ namespace opengm {
     }
 
     template<class GM, class LOSS>
+    void EditableDataset<GM, LOSS>::setGT(const size_t i, const GTVector& gt) {
+        OPENGM_CHECK_OP(i, <, this->gts_.size(),"");
+        this->gts_[i] = gt;
+        this->buildModelWithLoss(i);
+    }
+
+    template<class GM, class LOSS>
     void EditableDataset<GM, LOSS>::pushBackInstance(const GM& gm, const GTVector& gt, const LossParameterType& p) {
         this->gms_.push_back(gm);
         this->gts_.push_back(gt);
@@ -74,6 +95,10 @@ namespace opengm {
     template<class GM, class LOSS>
     void EditableDataset<GM, LOSS>::setWeights(Weights& w) {
         this->weights_ = w;
+        // LinkWeights<Weights> LinkFunctor(w);
+        // for(size_t i=0; i<this->gms_.size(); ++i){
+        //     (this->gms_[i])[0].callFunctor(LinkFunctor);
+        // }
     }
 
    } // namespace datasets
diff --git a/include/opengm/learning/dataset/testdatasets.hxx b/include/opengm/learning/dataset/testdatasets.hxx
index fde3751..b278e64 100644
--- a/include/opengm/learning/dataset/testdatasets.hxx
+++ b/include/opengm/learning/dataset/testdatasets.hxx
@@ -6,6 +6,7 @@
 #include <cstdlib>
 
 #include <opengm/learning/dataset/dataset.hxx>
+#include <opengm/learning/dataset/editabledataset.hxx>
 #include <opengm/functions/learnable/lpotts.hxx>
 #include <opengm/functions/learnable/sum_of_experts.hxx>
 
@@ -69,6 +70,20 @@ namespace opengm {
          TestDatasetSimple(size_t numModels=1); 
       };
 
+      template<class GM, class LOSS>
+      class EditableTestDataset : public EditableDataset<GM,LOSS>{ 
+      public:
+         typedef GM                     GMType;
+         typedef GM                     GMWITHLOSS;
+         typedef LOSS                   LossType;
+         typedef typename GM::ValueType ValueType;
+         typedef typename GM::IndexType IndexType;
+         typedef typename GM::LabelType LabelType;
+         typedef opengm::learning::Weights<ValueType> Weights;
+
+         EditableTestDataset(size_t numModels=5); 
+      };
+
 //***********************************
 //** IMPL TestDataset 0
 //***********************************
@@ -280,6 +295,71 @@ namespace opengm {
          }
       };
  
+//***********************************
+//** IMPL TestDataset 2 (editable)
+//***********************************
+      template<class GM, class LOSS>
+      EditableTestDataset<GM,LOSS>::EditableTestDataset(size_t numModels)
+      { 
+         this->count_.resize(numModels,0);
+         this->weights_ = Weights(3);
+         LabelType numberOfLabels = 2;
+         this->gts_.resize(numModels,std::vector<size_t>(64*64,0));
+         for(size_t m=0;m<numModels;++m){
+            for(size_t i=32*64; i<64*64; ++i){
+               this->gts_[m][i] = 1;
+            }
+         }
+         this->gms_.resize(numModels);
+         this->gmsWithLoss_.resize(numModels);
+         for(size_t m=0; m<numModels; ++m){
+            std::srand(m);
+            for (int j = 0; j < 64*64; j++)
+               this->gms_[m].addVariable(2);
+            for(size_t y = 0; y < 64; ++y){ 
+               for(size_t x = 0; x < 64; ++x) {
+                  // function
+                  const size_t numExperts = 2;
+                  const std::vector<size_t> shape(1,numberOfLabels);
+                  std::vector<marray::Marray<ValueType> > feat(numExperts,marray::Marray<ValueType>(shape.begin(), shape.end()));
+                  ValueType val0 = (double)(this->gts_[m][y*64+x]) + (double)(std::rand()) / (double) (RAND_MAX) * 1.0 - 0.5 ;
+                  feat[0](0) = std::fabs(val0-0);
+                  feat[0](1) = std::fabs(val0-1); 
+                  ValueType val1 = (double)(this->gts_[m][y*64+x]) + (double)(std::rand()) / (double) (RAND_MAX) * 2.0 - 1.0 ;
+                  feat[1](0) = std::fabs(val1-0);
+                  feat[1](1) = std::fabs(val1-1);
+                  std::vector<size_t> wID(2);
+                  wID[0]=1;  wID[1]=2;
+                  opengm::functions::learnable::SumOfExperts<ValueType,IndexType,LabelType> f(shape,this->weights_, wID, feat);
+                  typename GM::FunctionIdentifier fid =  this->gms_[m].addFunction(f);
+
+                  // factor
+                  size_t variableIndices[] = {y*64+x};
+                  this->gms_[m].addFactor(fid, variableIndices, variableIndices + 1);
+               }
+            }
+          
+            opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(this->weights_,2,std::vector<size_t>(1,0),std::vector<ValueType>(1,1));
+            typename GM::FunctionIdentifier fid = this->gms_[m].addFunction(f);      
+            for(size_t y = 0; y < 64; ++y){ 
+               for(size_t x = 0; x < 64; ++x) {
+                  if(x + 1 < 64) { // (x, y) -- (x + 1, y)
+                     size_t variableIndices[] = {y*64+x, y*64+x+1};
+                     //sort(variableIndices, variableIndices + 2);
+                     this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
+                  }
+                  if(y + 1 < 64) { // (x, y) -- (x, y + 1)
+                     size_t variableIndices[] = {y*64+x, (y+1)*64+x};
+                     //sort(variableIndices, variableIndices + 2);
+                     this->gms_[m].addFactor(fid, variableIndices, variableIndices + 2);
+                  }
+               }    
+            }
+            this->buildModelWithLoss(m);
+         }
+      };
+
+
    }
 } // namespace opengm
 
diff --git a/include/opengm/learning/gradient-accumulator.hxx b/include/opengm/learning/gradient-accumulator.hxx
index 746718c..a2bf804 100644
--- a/include/opengm/learning/gradient-accumulator.hxx
+++ b/include/opengm/learning/gradient-accumulator.hxx
@@ -43,7 +43,7 @@ public:
 		_mode(mode) {
 
 		for (size_t i = 0; i < gradient.numberOfWeights(); i++)
-			gradient[i] = 0;
+			_gradient[i] = 0;
 	}
 
 	template <typename Iterator, typename FunctionType>
diff --git a/include/opengm/learning/struct-max-margin.hxx b/include/opengm/learning/struct-max-margin.hxx
index 775732d..f810bed 100644
--- a/include/opengm/learning/struct-max-margin.hxx
+++ b/include/opengm/learning/struct-max-margin.hxx
@@ -98,6 +98,7 @@ private:
 				for (int i = 0; i < _dataset.getNumberOfModels(); i++) {
 
 					// get E(x,y) and F(x,y)
+					std::cout << "locking model " << i << " of " << _dataset.getNumberOfModels() <<  std::endl;
 					_dataset.lockModel(i);
 					const typename DatasetType::GMType&     gm  = _dataset.getModel(i);
 					const typename DatasetType::GMWITHLOSS& gml = _dataset.getModelWithLoss(i);
diff --git a/src/unittest/learning/CMakeLists.txt b/src/unittest/learning/CMakeLists.txt
index c07c40a..b1a4c63 100644
--- a/src/unittest/learning/CMakeLists.txt
+++ b/src/unittest/learning/CMakeLists.txt
@@ -31,7 +31,7 @@ if(BUILD_TESTING)
        endif()
      endif()
    endif()
-    
+   
    add_executable(test-generalized-hammingloss test_generalized_hammingloss.cxx ${headers})
    add_test(test-generalized-hammingloss ${CMAKE_CURRENT_BINARY_DIR}/test-generalized-hammingloss)
 endif()
diff --git a/src/unittest/learning/test_learning.cxx b/src/unittest/learning/test_learning.cxx
index 9a52ac2..2b803e4 100644
--- a/src/unittest/learning/test_learning.cxx
+++ b/src/unittest/learning/test_learning.cxx
@@ -22,6 +22,7 @@
 //#include <opengm/learning/dataset/testdataset.hxx>
 //#include <opengm/learning/dataset/testdataset2.hxx>
 #include <opengm/learning/dataset/testdatasets.hxx>
+#include <opengm/learning/dataset/editabledataset.hxx>
 
 
 //*************************************
@@ -37,6 +38,7 @@ typedef opengm::LPGurobi<GM,opengm::Minimizer> INF;
 #else
 typedef opengm::LPCplex<GM,opengm::Minimizer> INF;
 #endif
+typedef opengm::datasets::EditableTestDataset<GM,LOSS> EDS;
 typedef opengm::datasets::TestDataset1<GM,LOSS> DS1;
 typedef opengm::datasets::TestDataset2<GM,LOSS> DS2;
 typedef opengm::datasets::TestDatasetSimple<GM,LOSS> DSS;
@@ -106,5 +108,124 @@ int main() {
    }
 
 
+   {
+        // create editable dataset
+        EDS learningdataset;
+
+        INF::Parameter infPara;
+        infPara.integerConstraint_ = true;
+
+        std::vector< std::vector< LabelType > >GTSolutionVector;
+
+        std::cout << "inference with fixed, arbitrary weights to generate solution" << std::endl;
+
+        EDS::Weights learningWeightVector = learningdataset.getWeights();
+        EDS::Weights randomWeights(learningdataset.getNumberOfWeights());
+
+
+        // opengm::learning::StructMaxMargin<EDS>::Parameter para0;
+        // para0.optimizerParameter_.lambda = 1;
+        // opengm::learning::StructMaxMargin<EDS> learner0(learningdataset,para0);
+
+        // // // learn
+        // learner0.learn<INF>(infPara);
+
+        // std::srand(std::time(0));
+        for (int i = 0; i < learningWeightVector.numberOfWeights(); ++i)
+        {
+            randomWeights[i] = 1.0;
+
+            std::cout << randomWeights[i] << " --->  "  << learningWeightVector[i] << std::endl;
+            learningWeightVector.setWeight(i, randomWeights[i]);//double(std::rand()) / RAND_MAX * 100);
+        }
+
+        for (size_t modelIndex = 0; modelIndex < learningdataset.getNumberOfModels(); modelIndex++)
+        {
+
+            std::cout << "starting inference on GM " << modelIndex << std::endl;
+
+            // INF inference(learningdataset.getModel(modelIndex), infPara);
+            // inference.infer();
+            // std::vector< LabelType > sol1;
+            
+            // OPENGM_TEST(inference.arg(sol1) == opengm::NORMAL);
+
+            INF solver(learningdataset.getModel(modelIndex),infPara);
+            solver.infer();
+            std::vector< LabelType > sol1;
+            OPENGM_TEST(solver.arg(sol1) == opengm::NORMAL);
+
+
+            std::cout << "add solution "<< modelIndex <<" to new dataset" << std::endl;
+            learningdataset.setGT(modelIndex,sol1);
+
+            for (size_t j = 0; j < sol1.size(); j++)
+            {
+              std::cout << sol1[j];
+            }
+            std::cout << std::endl;
+            GTSolutionVector.push_back(sol1);
+        }
+
+
+        std::cout << "learn weights (without regularization)" << std::endl;
+
+        std::cout << "weight vector size " << learningdataset.getNumberOfWeights() << std::endl;
+        // Parameter
+        opengm::learning::StructMaxMargin<EDS>::Parameter para;
+        para.optimizerParameter_.lambda = 0.000000001;
+        opengm::learning::StructMaxMargin<EDS> learner(learningdataset,para);
+
+        // learn
+        learner.learn<INF>(infPara);
+
+        // get the result
+        const EDS::Weights &learnedParameters = learner.getWeights();
+        std::cout << learnedParameters.numberOfWeights() << std::endl;
+        std::cout << "set learnedParameters as new Weights: ";
+        for (size_t i = 0; i < learnedParameters.numberOfWeights(); ++i)
+        {
+            std::cout << learnedParameters[i] << " ";
+            learningWeightVector.setWeight(i, learnedParameters[i]);
+        }
+        std::cout << std::endl;
+
+        std::cout << "new weights: ";
+        for (int i = 0; i < learningWeightVector.numberOfWeights(); i++)
+        {
+            std::cout << learningWeightVector[i] << ", ";
+        }
+        std::cout << std::endl;
+
+
+        std::cout << "inference with new weights" << std::endl;
+        for (size_t modelIndex = 0; modelIndex < learningdataset.getNumberOfModels(); modelIndex++)
+        {
+            std::cout << "starting inference on GM " << modelIndex << " with learned weights" << std::endl;
+            INF solver(learningdataset.getModel(modelIndex),infPara);
+            solver.infer();
+            std::vector< LabelType > sol2;
+            OPENGM_TEST(solver.arg(sol2) == opengm::NORMAL);
+            for (size_t j = 0; j < sol2.size(); j++)
+            {
+               std::cout << "sol2["<<j<<"]:" << sol2[j] << "   GTSolutionVector["<<modelIndex<<"]["<<j<<"]:" << GTSolutionVector[modelIndex][j] << std::endl; 
+               OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
+            }
+            // for (size_t j = 0; j < sol2.size(); j++)
+            // {
+            //    std::cout << sol2[j]; 
+            //    // OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
+            // }
+            // std::cout << std::endl<< std::endl;
+            // for (size_t j = 0; j < sol2.size(); j++)
+            // {
+            //    std::cout <<  GTSolutionVector[modelIndex][j]; 
+            //    // OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
+            // }
+            std::cout << "all " << sol2.size() << " solutions are correct" << std::endl;
+        }
+
+    }
+
 }
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list