[opengm] 199/386: better struct. perceptron

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:40 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 731812c3f2dbd8dde018a12b72a013cd10f71f14
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Sun Jan 4 18:42:11 2015 +0100

    better struct. perceptron
---
 fubar/real_example_2.py                            |  2 ++
 include/opengm/learning/structured_perceptron.hxx  |  4 +--
 .../python/opengm/learning/CMakeLists.txt          |  1 +
 src/interfaces/python/opengm/learning/__init__.py  | 32 ++++++++++++++++++++++
 src/interfaces/python/opengm/learning/learning.cxx | 11 ++++++--
 5 files changed, 45 insertions(+), 5 deletions(-)

diff --git a/fubar/real_example_2.py b/fubar/real_example_2.py
index ccad241..09b2a7b 100644
--- a/fubar/real_example_2.py
+++ b/fubar/real_example_2.py
@@ -132,6 +132,8 @@ nTestPoints  =numpy.ones(nWeights).astype('uint64')*5
 #learner = learning.structMaxMarginLearner(dataset, 1.0, 0.001, 0)
 #learner = learning.maxLikelihoodLearner(dataset)
 learner =  learning.structPerceptron(dataset, decayExponent=-0.001, learningMode='batch')
+#learner =  learning.subgradientSSVM(dataset, decayExponent=-0.001, learningMode='batch')
+
 
 learner.learn(infCls=opengm.inference.QpboExternal, 
               parameter=opengm.InfParam())
diff --git a/include/opengm/learning/structured_perceptron.hxx b/include/opengm/learning/structured_perceptron.hxx
index 0056727..619dac2 100644
--- a/include/opengm/learning/structured_perceptron.hxx
+++ b/include/opengm/learning/structured_perceptron.hxx
@@ -1,6 +1,6 @@
 #pragma once
-#ifndef OPENGM_GRIDSEARCH_LEARNER_HXX
-#define OPENGM_GRIDSEARCH_LEARNER_HXX
+#ifndef OPENGM_STRUCT_PERCEPTRON_LEARNER_HXX
+#define OPENGM_STRUCT_PERCEPTRON_LEARNER_HXX
 
 #include <vector>
 #include <opengm/inference/inference.hxx>
diff --git a/src/interfaces/python/opengm/learning/CMakeLists.txt b/src/interfaces/python/opengm/learning/CMakeLists.txt
index 8f0f489..45ab4ce 100644
--- a/src/interfaces/python/opengm/learning/CMakeLists.txt
+++ b/src/interfaces/python/opengm/learning/CMakeLists.txt
@@ -26,6 +26,7 @@ set(PY_OPENGM_CORE_SOURCES
             pyGridSearchLearner.cxx
             pyMaxLikelihoodLearner.cxx
             pyStructMaxMarginLearner.cxx
+            pySubgradientSSVM.cxx
             pyStructPerceptron.cxx
             )
 
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index 3a5be86..c0a918e 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -123,6 +123,38 @@ def structPerceptron(dataset, learningMode='online',eps=1e-5, maxIterations=1000
     learner = learnerCls(dataset, param)
     return learner
 
+
+def subgradientSSVM(dataset, learningMode='online',eps=1e-5, maxIterations=10000, stopLoss=0.0, decayExponent=0.0, decayT0=0.0):
+
+
+    if dataset.__class__.lossType == 'hamming':
+        learnerCls = SubgradientSSVM_HammingLoss
+        learnerParamCls = SubgradientSSVM_HammingLossParameter
+        learningModeEnum = SubgradientSSVM_HammingLossParameter_LearningMode
+    elif dataset.__class__.lossType == 'generalized-hamming':
+        learnerCls = SubgradientSSVM_GeneralizedHammingLossParameter
+        learnerParamCls = SubgradientSSVM_GeneralizedHammingLoss
+        learningModeEnum = SubgradientSSVM_GeneralizedHammingLossParameter_LearningMode
+
+    lm = None
+    if learningMode not in ['online','batch']:
+        raise RuntimeError("wrong learning mode, must be 'online' or 'batch' ")
+
+    if learningMode == 'online':
+        lm = learningModeEnum.online
+    if learningMode == 'batch':
+        lm = learningModeEnum.batch
+
+    param = learnerParamCls()
+    param.eps = float(eps)
+    param.maxIterations = int(maxIterations)
+    param.stopLoss = float(stopLoss)
+    param.decayExponent = float(decayExponent)
+    param.decayT0 = float(decayT0)
+    param.learningMode = lm
+    learner = learnerCls(dataset, param)
+    return learner
+
 def structMaxMarginLearner(dataset, regularizerWeight=1.0, minEps=1e-5, nSteps=0, epsStrategy='change', optimizer='bundle'):
 
     if opengmConfig.withCplex or opengmConfig.withGurobi :
diff --git a/src/interfaces/python/opengm/learning/learning.cxx b/src/interfaces/python/opengm/learning/learning.cxx
index 8697dc2..05c0821 100644
--- a/src/interfaces/python/opengm/learning/learning.cxx
+++ b/src/interfaces/python/opengm/learning/learning.cxx
@@ -15,6 +15,7 @@
 #include <opengm/learning/bundle-optimizer.hxx>
 #endif
 
+
 namespace bp = boost::python;
 namespace op = opengm::python;
 namespace ol = opengm::learning;
@@ -41,8 +42,13 @@ namespace opengm{
     template<class DATASET>
     void export_struct_perceptron_learner(const std::string & clsName);
 
+    template<class DATASET>
+    void export_subgradient_ssvm_learner(const std::string & clsName);
+
     template<class GM_ADDER,class GM_MULT>  
     void export_lfunction_generator();
+
+
 }
 
 
@@ -80,9 +86,8 @@ BOOST_PYTHON_MODULE_INIT(_learning) {
     opengm::export_struct_perceptron_learner<op::GmAdderGeneralizedHammingLossDataset>("StructPerceptron_GeneralizedHammingLoss");
     //opengm::export_struct_perceptron_learner<op::GmAdderGeneralizedHammingLossDataset>("StructPerceptron_FlexibleLoss");
     
-
-
-
+    opengm::export_subgradient_ssvm_learner<op::GmAdderHammingLossDataset>("SubgradientSSVM_HammingLoss");
+    opengm::export_subgradient_ssvm_learner<op::GmAdderHammingLossDataset>("SubgradientSSVM_GeneralizedHammingLoss");
 
     opengm::export_max_likelihood_learner<op::GmAdderHammingLossDataset>("MaxLikelihood_HammingLoss");
     opengm::export_max_likelihood_learner<op::GmAdderGeneralizedHammingLossDataset>("MaxLikelihood_GeneralizedHammingLoss");

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list