[opengm] 54/386: add: test for max margin learning that reproduces

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:35:04 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 3f099a01a8e11531fe6ad1121e629d57fefaa0d6
Author: Steffen-Wolf <Steffen.Wolf at stud.uni-heidelberg.de>
Date:   Mon Dec 15 18:17:50 2014 +0100

    add: test for max margin learning that reproduces
---
 include/opengm/learning/dataset/dataset.hxx      |   2 +-
 include/opengm/learning/dataset/testdataset2.hxx |  10 +-
 src/unittest/test_learning.cxx                   | 126 +++++++++++++++++++++--
 3 files changed, 125 insertions(+), 13 deletions(-)

diff --git a/include/opengm/learning/dataset/dataset.hxx b/include/opengm/learning/dataset/dataset.hxx
index e6e9aac..4d07dbf 100644
--- a/include/opengm/learning/dataset/dataset.hxx
+++ b/include/opengm/learning/dataset/dataset.hxx
@@ -18,7 +18,7 @@ namespace opengm {
          typedef GM                     GMWITHLOSS;
          typedef typename GM::ValueType ValueType;
          typedef typename GM::IndexType IndexType;
-         typedef typename GM::LabelType LabelType; 
+         typedef typename GM::LabelType LabelType;
          typedef opengm::learning::Weights<ValueType> Weights;
 
          bool                          lockModel(const size_t i)         { ++count_[i]; }
diff --git a/include/opengm/learning/dataset/testdataset2.hxx b/include/opengm/learning/dataset/testdataset2.hxx
index 06ba46f..449216e 100644
--- a/include/opengm/learning/dataset/testdataset2.hxx
+++ b/include/opengm/learning/dataset/testdataset2.hxx
@@ -17,20 +17,20 @@ namespace opengm {
          typedef GM                     GMType;
          typedef typename GM::ValueType ValueType;
          typedef typename GM::IndexType IndexType;
-         typedef typename GM::LabelType LabelType; 
+         typedef typename GM::LabelType LabelType;
          typedef opengm::learning::Weights<ValueType> Weights;
 
          GM&                           getModel(const size_t i)  { return gms_[i]; }
          const std::vector<LabelType>& getGT(const size_t i)     { return gt_; }
          Weights&                      getWeights()              { return weights_; }
          size_t                        getNumberOfWeights()      { return 3; }
-         size_t                        getNumberOfModels()       { return gms_.size(); } 
+         size_t                        getNumberOfModels()       { return gms_.size(); }
          
-         TestDataset2(size_t numModels=4); 
+         TestDataset2(size_t numModels=4);
 
       private:
-         std::vector<GM> gms_; 
-         std::vector<LabelType> gt_; 
+         std::vector<GM> gms_;
+         std::vector<LabelType> gt_;
          Weights weights_;
       };
       
diff --git a/src/unittest/test_learning.cxx b/src/unittest/test_learning.cxx
index e45b2ab..a101aeb 100644
--- a/src/unittest/test_learning.cxx
+++ b/src/unittest/test_learning.cxx
@@ -29,11 +29,126 @@ typedef opengm::ICM<GM,opengm::Minimizer> INF;
 
 //*************************************
 
+template<class T>
+struct LearningTest{
+
+    typedef T                                                                  ValueType;
+    typedef OPENGM_TYPELIST_2(
+        opengm::ExplicitFunction<T>,
+        opengm::functions::learnable::LPotts<T>)                               FunctionTypeList;
+    typedef opengm::GraphicalModel<ValueType, opengm::Adder, FunctionTypeList> GraphicalModelType;
+    typedef opengm::datasets::TestDataset<GraphicalModelType>                  DatasetType;
+    typedef typename DatasetType::Weights                                      Weights;
+    typedef opengm::learning::HammingLoss                                      LossGeneratorType;
+    typedef opengm::Bruteforce<GraphicalModelType, opengm::Minimizer>          InferenceType;
+
+    void testStructMaxMargin()
+    {
+
+        // create a dataset
+        DatasetType dataset;
+
+        // create a learning algorithm
+        opengm::learning::StructMaxMargin<DatasetType, LossGeneratorType> structMaxMargin(dataset);
+
+        // train
+        typename InferenceType::Parameter infParams;
+        structMaxMargin.template learn<InferenceType>(infParams);
+
+        // get the result
+        const Weights &learntParameters = structMaxMargin.getWeights();
+        std::cout << learntParameters.numberOfWeights() << std::endl;
+        for (size_t i = 0; i < learntParameters.numberOfWeights(); ++i)
+            std::cout << learntParameters[i] << " ";
+        std::cout << std::endl;
+    }
+
+
+    void testStructMaxMargin_prediction()
+    {
+
+        // create a dataset
+        DatasetType dataset;
+
+        std::vector< std::vector<size_t> >GTSolutionVector;
+
+        std::cout << "inference with fixed, arbitrary weights to generate solution" << std::endl;
+
+        Weights weightVector = dataset.getWeights();
+        // std::srand(std::time(0));
+
+        for (int i = 0; i < weightVector.numberOfWeights(); i++)
+        {
+            weightVector.setWeight(i, double(std::rand()) / RAND_MAX * 100);
+            std::cout << weightVector[i] << std::endl;
+        }
+
+        for (size_t modelIndex = 0; modelIndex < dataset.getNumberOfModels(); modelIndex++)
+        {
+
+            std::cout << "starting inference on GM " << modelIndex << std::endl;
+            InferenceType solver(dataset.getModel(modelIndex));
+            solver.infer();
+            std::vector<size_t> sol1;
+            OPENGM_TEST(solver.arg(sol1) == opengm::NORMAL);
+            GTSolutionVector.push_back(sol1);
+            std::cout << "add solution to GM " << modelIndex << std::endl;
+            for (size_t j = 0; j < sol1.size(); j++)
+            {
+                // TODO: find way to set GT weights
+                // dataset.getGT(modelIndex)[j] = sol1[j]; does not work
+            }
+        }
+
+        std::cout << "learn weights (without regularization)" << std::endl;
+        // create a learning algorithm
+        opengm::learning::StructMaxMargin<DatasetType, LossGeneratorType> structMaxMargin(dataset);
+        // train
+        typename InferenceType::Parameter infParams;
+        structMaxMargin.template learn<InferenceType>(infParams);
+
+        // get the result
+        const Weights &learntParameters = structMaxMargin.getWeights();
+        std::cout << learntParameters.numberOfWeights() << std::endl;
+        std::cout << "learntParameters: ";
+        for (size_t i = 0; i < learntParameters.numberOfWeights(); ++i)
+        {
+            std::cout << learntParameters[i] << " ";
+            weightVector.setWeight(i, learntParameters[i]);
+        }
+        std::cout << std::endl;
+
+        std::cout << "inference with new weights" << std::endl;
+        for (size_t modelIndex = 0; modelIndex < dataset.getNumberOfModels(); modelIndex++)
+        {
+            std::cout << "starting inference on GM " << modelIndex << "with learned weights" << std::endl;
+            InferenceType solver(dataset.getModel(modelIndex));
+            solver.infer();
+            std::vector<size_t> sol2;
+            OPENGM_TEST(solver.arg(sol2) == opengm::NORMAL);
+            for (size_t j = 0; j < sol2.size(); j++)
+            {
+                OPENGM_TEST(sol2[j] == GTSolutionVector[modelIndex][j]);
+            }
+        }
+    }
+
+    void run()
+    {
+        this->testStructMaxMargin();
+        this->testStructMaxMargin_prediction();
+    }
+};
 
 int main() {
    std::cout << " Includes are fine :-) " << std::endl; 
-  
-   {
+
+    //  {
+    //  LearningTest<double >t;
+    //  t.run();
+    // }
+
+    {
       DS dataset;
       std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
       
@@ -43,9 +158,8 @@ int main() {
       
       INF::Parameter infPara;
       learner.learn<INF>(infPara);
-      
    }
-  
+
    {
       DS2 dataset;
       std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
@@ -57,6 +171,4 @@ int main() {
       INF::Parameter infPara;
       learner.learn<INF>(infPara);
    }
-
-
-}
+}
\ No newline at end of file

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list