[opengm] 120/386: i am so lazy, i do not write proper commit messages

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:35:32 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 7834ad399089d688baa99dfbaa5ccdec0a553838
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Thu Dec 18 15:02:28 2014 +0100

    i am so lazy, i do not write proper commit messages
---
 src/interfaces/python/opengm/learning/__init__.py  | 35 +++++++++++++---------
 src/interfaces/python/opengm/learning/learning.cxx | 10 +++++--
 .../opengm/learning/pyStructMaxMarginLearner.cxx   |  3 ++
 3 files changed, 31 insertions(+), 17 deletions(-)

diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index b9f90f7..6e0c53f 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -2,6 +2,7 @@ from _learning import *
 import numpy
 import struct
 from opengm import index_type,value_type, label_type
+from opengm import configuration as opengmConfig
 
 DatasetWithHammingLoss.lossType = 'hamming'
 DatasetWithGeneralizedHammingLoss.lossType = 'generalized-hamming'
@@ -18,8 +19,10 @@ def _extendedLearn(self, infCls, parameter = None):
 
 GridSearch_HammingLoss.learn  =_extendedLearn
 GridSearch_GeneralizedHammingLoss.learn  =_extendedLearn
-StructMaxMargin_Bundle_HammingLoss.learn = _extendedLearn
-StructMaxMargin_Bundle_GeneralizedHammingLoss = _extendedLearn
+
+if opengmConfig.withCplex or opengmConfig.withGurobi :
+    StructMaxMargin_Bundle_HammingLoss.learn = _extendedLearn
+    StructMaxMargin_Bundle_GeneralizedHammingLoss = _extendedLearn
         
 def createDataset(loss='hamming', numInstances=0):
     
@@ -60,20 +63,24 @@ def gridSearchLearner(dataset, lowerBounds, upperBounds, nTestPoints):
 
 
 def structMaxMarginLearner(dataset, regularizerWeight=1.0, minGap=1e-5, nSteps=0, optimizer='bundle'):
-    if optimizer != 'bundle':
-        raise RuntimeError("Optimizer type must be 'bundle' for now!")
 
-    if dataset.__class__.lossType == 'hamming':
-        learnerCls = StructMaxMargin_Bundle_HammingLoss
-        learnerParamCls = StructMaxMargin_Bundle_HammingLossParameter
-    elif dataset.__class__.lossType == 'generalized-hamming':
-        learnerCls = StructMaxMargin_Bundle_GeneralizedHammingLoss
-        learnerParamCls = StructMaxMargin_Bundle_GeneralizedHammingLossParameter
+    if opengmConfig.withCplex or opengmConfig.withGurobi :
+        if optimizer != 'bundle':
+            raise RuntimeError("Optimizer type must be 'bundle' for now!")
 
-    param = learnerParamCls(regularizerWeight, minGap, nSteps)
-    learner = learnerCls(dataset, param)
-    
-    return learner
+        if dataset.__class__.lossType == 'hamming':
+            learnerCls = StructMaxMargin_Bundle_HammingLoss
+            learnerParamCls = StructMaxMargin_Bundle_HammingLossParameter
+        elif dataset.__class__.lossType == 'generalized-hamming':
+            learnerCls = StructMaxMargin_Bundle_GeneralizedHammingLoss
+            learnerParamCls = StructMaxMargin_Bundle_GeneralizedHammingLossParameter
+
+        param = learnerParamCls(regularizerWeight, minGap, nSteps)
+        learner = learnerCls(dataset, param)
+        
+        return learner
+    else:
+        raise RuntimeError("this learner needs widthCplex or withGurobi")
 
 
 def lPottsFunctions(nFunctions, numberOfLabels, features, weightIds):
diff --git a/src/interfaces/python/opengm/learning/learning.cxx b/src/interfaces/python/opengm/learning/learning.cxx
index bbaa07a..62af562 100644
--- a/src/interfaces/python/opengm/learning/learning.cxx
+++ b/src/interfaces/python/opengm/learning/learning.cxx
@@ -9,7 +9,10 @@
 #include <opengm/learning/loss/hammingloss.hxx>
 #include <opengm/learning/loss/generalized-hammingloss.hxx>
 #include <opengm/learning/loss/noloss.hxx>
+
+#if defined(WITH_CPLEX) || defined(WITH_GUROBI)
 #include <opengm/learning/bundle-optimizer.hxx>
+#endif
 
 namespace bp = boost::python;
 namespace op = opengm::python;
@@ -57,7 +60,8 @@ BOOST_PYTHON_MODULE_INIT(_learning) {
     opengm::export_grid_search_learner<op::GmAdderHammingLossDataset>("GridSearch_HammingLoss");
     opengm::export_grid_search_learner<op::GmAdderGeneralizedHammingLossDataset>("GridSearch_GeneralizedHammingLoss");
     
-    opengm::export_struct_max_margin_bundle_learner< op::GmAdderHammingLossDataset, ol::BundleOptimizer<op::GmValueType> >("StructMaxMargin_Bundle_HammingLoss");
-    opengm::export_struct_max_margin_bundle_learner< op::GmAdderGeneralizedHammingLossDataset, ol::BundleOptimizer<op::GmValueType> >("StructMaxMargin_Bundle_GeneralizedHammingLoss");
-
+    #if defined(WITH_CPLEX) || defined(WITH_GUROBI)
+        opengm::export_struct_max_margin_bundle_learner< op::GmAdderHammingLossDataset, ol::BundleOptimizer<op::GmValueType> >("StructMaxMargin_Bundle_HammingLoss");
+        opengm::export_struct_max_margin_bundle_learner< op::GmAdderGeneralizedHammingLossDataset, ol::BundleOptimizer<op::GmValueType> >("StructMaxMargin_Bundle_GeneralizedHammingLoss");
+    #endif
 }
diff --git a/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx b/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
index 51dab25..9e00396 100644
--- a/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
+++ b/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
@@ -1,3 +1,5 @@
+#if defined(WITH_CPLEX) || defined(WITH_GUROBI)
+
 #include <boost/python.hpp>
 #include <boost/python/module.hpp>
 #include <opengm/python/opengmpython.hxx>
@@ -73,3 +75,4 @@ namespace opengm{
 
 
 
+#endif

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list