[opengm] 207/386: new stuff

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:46 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit cc25b05cc32c6e8ae5530885c0a737d5062740af
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Thu Jan 8 15:41:23 2015 +0100

    new stuff
---
 fubar/brown_horse.py                               |   2 +-
 fubar/make_grid_potts_dset.py                      |   2 +-
 fubar/pascal_voc/convertPascalVOCNew.py            |   5 +-
 include/opengm/learning/loss/flexibleloss.hxx      |  87 +++++++++----
 include/opengm/python/opengmpython.hxx             |   3 +-
 src/interfaces/python/opengm/__init__.py           |   2 +-
 .../python/opengm/learning/CMakeLists.txt          |   2 +-
 src/interfaces/python/opengm/learning/__init__.py  | 138 +++++++++++----------
 src/interfaces/python/opengm/learning/learning.cxx |  30 ++---
 .../python/opengm/learning/pyDataset.cxx           |   6 +-
 .../python/opengm/learning/pyGridSearchLearner.cxx |   6 +-
 src/interfaces/python/opengm/learning/pyLoss.cxx   |  59 +++++----
 .../opengm/learning/pyMaxLikelihoodLearner.cxx     |   6 +-
 .../opengm/learning/pyStructMaxMarginLearner.cxx   |   4 +-
 .../python/opengm/learning/pyStructPerceptron.cxx  |   6 +-
 .../python/opengm/learning/pySubgradientSSVM.cxx   |   9 +-
 16 files changed, 203 insertions(+), 164 deletions(-)

diff --git a/fubar/brown_horse.py b/fubar/brown_horse.py
index 60dee6d..7d720d7 100644
--- a/fubar/brown_horse.py
+++ b/fubar/brown_horse.py
@@ -102,7 +102,7 @@ dataset,test_set = secondOrderImageDataset(imgs=imgs, gts=gts, numberOfLabels=2,
 
 
 learner =  learning.subgradientSSVM(dataset, learningRate=0.3, C=100, 
-                                    learningMode='batch',maxIterations=500)
+                                    learningMode='batch',maxIterations=40)
 
 #learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
 
diff --git a/fubar/make_grid_potts_dset.py b/fubar/make_grid_potts_dset.py
index cda360b..2ab93f0 100644
--- a/fubar/make_grid_potts_dset.py
+++ b/fubar/make_grid_potts_dset.py
@@ -48,7 +48,7 @@ def secondOrderImageDataset(imgs, gts, numberOfLabels, fUnary, fBinary, addConst
             tentative_test_set.append((img,gt))
 
 
-    dataset = learning.createDataset(numWeights=nWeights, loss='h')
+    dataset = learning.createDataset(numWeights=nWeights)
     weights = dataset.getWeights()
     uWeightIds = numpy.arange(nUnaryFeat ,dtype='uint64')
     bWeightIds = numpy.arange(start=nUnaryFeat,stop=nWeights,dtype='uint64')
diff --git a/fubar/pascal_voc/convertPascalVOCNew.py b/fubar/pascal_voc/convertPascalVOCNew.py
index 8c7315f..b85b121 100644
--- a/fubar/pascal_voc/convertPascalVOCNew.py
+++ b/fubar/pascal_voc/convertPascalVOCNew.py
@@ -54,11 +54,12 @@ for ii, (fn, ds_suffix) in enumerate(zip(fns, ds_suffixes)):
     print 'num_weights =', num_weights
     print 'num_instances =', len(X)
 
-    ogm_dss[ii] = learning.createDataset(num_weights, numInstances=len(X), loss="generalized-hamming")
+    ogm_dss[ii] = learning.createDataset(num_weights, numInstances=len(X))
     #ogm_ds = ogm_dss[ii]
     ww[ii] = ogm_dss[ii].getWeights()
 
     for idx, (x, y) in enumerate(zip(X, Y)):
+        print idx
         y[y==-1]=0  # FIXME: introduce a void label, so long: make the void label background 
         unary_feats, edges, edge_feats = x
         num_vars = unary_feats.shape[0]
@@ -67,7 +68,7 @@ for ii, (fn, ds_suffix) in enumerate(zip(fns, ds_suffixes)):
         
         gm = opengm.gm(states, operator='adder')
 
-        lossParam = learning.GeneralizedHammingLossParameter()
+        lossParam =  learning.LossParameter(lossType='hamming', labelMult=label_weights)
         lossParam.setLabelLossMultiplier(label_weights)
 
         # add unary factors
diff --git a/include/opengm/learning/loss/flexibleloss.hxx b/include/opengm/learning/loss/flexibleloss.hxx
index 748fa07..6c3fa9f 100644
--- a/include/opengm/learning/loss/flexibleloss.hxx
+++ b/include/opengm/learning/loss/flexibleloss.hxx
@@ -20,9 +20,6 @@ public:
     class Parameter{
     public:
 
-        Parameter{
-            lambdaWeight = 1.0;
-        }
         enum LossType{
             Hamming = 0 ,
             L1 = 1,
@@ -31,6 +28,12 @@ public:
             ConfMat = 4
         };
 
+        Parameter(){
+            lossType_ = Hamming;
+            lambdaWeight = 1.0;
+        }
+
+
         bool operator==(const FlexibleLoss & other) const{
             throw opengm::RuntimeError("do not call me");
         }
@@ -42,7 +45,7 @@ public:
         }
         double getNodeLossMultiplier(const size_t i) const;
         double getLabelLossMultiplier(const size_t i) const;
-
+        double getFactorLossMultiplier(const size_t i) const;
         double getLabelConfMatMultiplier(const size_t l, const size_t lgt)const;
         /**
          * serializes the parameter object to the given hdf5 group handle;
@@ -53,13 +56,14 @@ public:
         void load(const hid_t& groupHandle);
         static std::size_t getLossId() { return lossId_; }
 
+        LossType lossType_;
+        double lambdaWeight;
 
         std::vector<double>     nodeLossMultiplier_;
         std::vector<double>     labelLossMultiplier_;
         std::vector<double>     factorMultipier_;
         marray::Marray<double>  confMat_;
-        LossType lossType_;
-        double lambdaWeight;
+        
 
 
     private:
@@ -88,6 +92,13 @@ inline double FlexibleLoss::Parameter::getNodeLossMultiplier(const size_t i) con
     return this->nodeLossMultiplier_[i];
 }
 
+inline double FlexibleLoss::Parameter::getFactorLossMultiplier(const size_t i) const {
+    if(i >= this->factorMultipier_.size()) {
+        return 1.;
+    }
+    return this->factorMultipier_[i];
+}
+
 inline double FlexibleLoss::Parameter::getLabelLossMultiplier(const size_t i) const {
     if(i >= this->labelLossMultiplier_.size()) {
         return 1.;
@@ -95,7 +106,7 @@ inline double FlexibleLoss::Parameter::getLabelLossMultiplier(const size_t i) co
     return this->labelLossMultiplier_[i];
 }
 
-double FlexibleLoss::Parameter::getLabelConfMatMultiplier(const size_t l, const size_t lgt)const{
+inline double FlexibleLoss::Parameter::getLabelConfMatMultiplier(const size_t l, const size_t lgt)const{
     if(l<confMat_.shape(0) && lgt<confMat_.shape(1)){
         return confMat_(l, lgt);
     }
@@ -143,6 +154,23 @@ double FlexibleLoss::loss(const GM & gm, IT1 labelBegin, const IT1 labelEnd, IT2
             }
         }
     }
+    else if(param_.lossType_ == Parameter::L1 || param_.lossType_ == Parameter::L2){
+        const size_t norm = param_.lossType_ == Parameter::L1 ? 1 : 2;
+        for(; labelBegin!= labelEnd; ++labelBegin, ++GTBegin, ++nodeIndex){
+            if(*labelBegin != *GTBegin){            
+                loss += param_.getNodeLossMultiplier(nodeIndex) * std::pow(std::abs(*GTBegin - *labelBegin), norm) * param_.lambdaWeight;
+            }
+        }
+    }
+    else if(param_.lossType_ == Parameter::ConfMat){
+        throw opengm::RuntimeError("ConfMat Loss is not yet implemented");
+    }
+    else if(param_.lossType_ == Parameter::Partition){
+        throw opengm::RuntimeError("Partition / Multicut Loss is not yet implemented");
+    }
+    else{
+        throw opengm::RuntimeError("INTERNAL ERROR: unknown Loss Type");
+    }
     return loss;
 }
 
@@ -153,7 +181,7 @@ void FlexibleLoss::addLoss(GM& gm, IT gt) const
     typedef typename  GM::IndexType IndexType;
     typedef typename  GM::ValueType ValueType;
     typedef opengm::ExplicitFunction<ValueType, IndexType,  LabelType>  ExplicitFunction;
-
+    typedef opengm::PottsFunction<ValueType, IndexType,  LabelType>  Potts;
 
     if(param_.lossType_ == Parameter::Hamming){
         for(IndexType i=0; i<gm.numberOfVariables(); ++i){
@@ -168,7 +196,7 @@ void FlexibleLoss::addLoss(GM& gm, IT gt) const
         }
     }
     else if(param_.lossType_ == Parameter::L1 || param_.lossType_ == Parameter::L2){
-        const size_t norm == aram_.lossType_ == Parameter::L1 ? 1 : 2;
+        const size_t norm = param_.lossType_ == Parameter::L1 ? 1 : 2;
         for(IndexType i=0; i<gm.numberOfVariables(); ++i){
             LabelType numL = gm.numberOfLabels(i);
             ExplicitFunction f(&numL, &numL+1, 0);
@@ -181,21 +209,36 @@ void FlexibleLoss::addLoss(GM& gm, IT gt) const
             gm.addFactor(gm.addFunction(f), &i, &i+1);     
         }
     }
-    else if(param_.lossType_ == Parameter::L1 || param_.lossType_ == Parameter::L2){
-        const size_t norm == aram_.lossType_ == Parameter::L1 ? 1 : 2;
-        for(IndexType i=0; i<gm.numberOfVariables(); ++i){
-            LabelType numL = gm.numberOfLabels(i);
-            ExplicitFunction f(&numL, &numL+1, 0);
-            const LabelType gtL = *gt;
-            for(LabelType l = 0; l < numL; ++l){
-                f(l) = - param_.getNodeLossMultiplier(i) * param_.getLabelConfMatMultiplier(l, gtL);
-            }
-            f(*gt) = 0;
-            ++gt;
-            gm.addFactor(gm.addFunction(f), &i, &i+1);     
-        }
+    else if(param_.lossType_ == Parameter::ConfMat){
+        throw opengm::RuntimeError("ConfMat Loss is not yet implemented");
     }
     else if(param_.lossType_ == Parameter::Partition){
+
+        const size_t nFactorsInit = gm.numberOfFactors();
+
+        for(size_t fi=0; fi<nFactorsInit; ++fi){
+            const size_t nVar = gm[fi].numberOfVariables();
+            OPENGM_CHECK_OP(nVar,==,2,"Partition / Multicut Loss  is only allowed if the graphical model has only"
+                                      " second order factors (this might be changed in the future");
+
+            const IndexType vis[2] = { gm[fi].variableIndex(0), gm[fi].variableIndex(1)};
+            const LabelType nl[2]  = { gm.numberOfLabels(vis[0]), gm.numberOfLabels(vis[1])};
+
+            const double facVal = param_.getFactorLossMultiplier(fi);
+
+            // in the gt they are in the same cluster
+            if(gt[vis[0]] == gt[vis[1]]){
+                Potts pf(nl[0],nl[1], 0.0, -1.0*facVal);
+                gm.addFactor(gm.addFunction(pf), vis,vis+2);
+            }
+            // in the gt they are in different clusters
+            else{
+                Potts pf(nl[0],nl[1], -1.0*facVal, 0.0);
+                gm.addFactor(gm.addFunction(pf), vis,vis+2);
+            }
+        }
+
+
         throw opengm::RuntimeError("Partition / Multicut Loss is not yet implemented");
     }
     else{
diff --git a/include/opengm/python/opengmpython.hxx b/include/opengm/python/opengmpython.hxx
index de21b7a..e579e6d 100644
--- a/include/opengm/python/opengmpython.hxx
+++ b/include/opengm/python/opengmpython.hxx
@@ -30,6 +30,7 @@
 #include <opengm/learning/loss/hammingloss.hxx>
 #include <opengm/learning/loss/generalized-hammingloss.hxx>
 #include <opengm/learning/loss/noloss.hxx>
+#include <opengm/learning/loss/flexibleloss.hxx>
 
 #include <algorithm>
 #include <vector>
@@ -118,7 +119,7 @@ namespace python{
 
    typedef opengm::datasets::EditableDataset<GmAdder, opengm::learning::HammingLoss > GmAdderHammingLossDataset;
    typedef opengm::datasets::EditableDataset<GmAdder, opengm::learning::GeneralizedHammingLoss > GmAdderGeneralizedHammingLossDataset;
-
+   typedef opengm::datasets::EditableDataset<GmAdder, opengm::learning::FlexibleLoss > GmAdderFlexibleLossDataset;
 
    typedef GmAdder::FactorType FactorGmAdder;
    typedef FactorGmAdder GmAdderFactor;
diff --git a/src/interfaces/python/opengm/__init__.py b/src/interfaces/python/opengm/__init__.py
index a79f6aa..bca9b8c 100644
--- a/src/interfaces/python/opengm/__init__.py
+++ b/src/interfaces/python/opengm/__init__.py
@@ -14,7 +14,7 @@ from _inference_interface_generator import _inject_interface , InferenceBase
 import inference
 import hdf5
 import benchmark
-
+from _to_native_converter import to_native_boost_python_enum_converter
 # initialize solver/ inference dictionaries
 _solverDicts=[
    (inference.adder.minimizer.solver.__dict__ ,     'adder',       'minimizer' ),
diff --git a/src/interfaces/python/opengm/learning/CMakeLists.txt b/src/interfaces/python/opengm/learning/CMakeLists.txt
index 45ab4ce..0b34301 100644
--- a/src/interfaces/python/opengm/learning/CMakeLists.txt
+++ b/src/interfaces/python/opengm/learning/CMakeLists.txt
@@ -24,7 +24,7 @@ set(PY_OPENGM_CORE_SOURCES
             pyDataset.cxx
             pyLoss.cxx
             pyGridSearchLearner.cxx
-            pyMaxLikelihoodLearner.cxx
+            #pyMaxLikelihoodLearner.cxx
             pyStructMaxMarginLearner.cxx
             pySubgradientSSVM.cxx
             pyStructPerceptron.cxx
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index 6114e65..9224970 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -4,8 +4,7 @@ import numpy
 import struct
 from opengm import index_type,value_type, label_type
 from opengm import configuration as opengmConfig, LUnaryFunction
-DatasetWithHammingLoss.lossType = 'hamming'
-DatasetWithGeneralizedHammingLoss.lossType = 'generalized-hamming'
+from opengm import to_native_boost_python_enum_converter
 
 
 
@@ -31,42 +30,59 @@ def _extendedGetTotalLoss(self, infCls, parameter = None):
     cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
     return self._getTotalLoss(cppParam)
 
-GridSearch_HammingLoss.learn  =_extendedLearn
-GridSearch_GeneralizedHammingLoss.learn  =_extendedLearn
 
-MaxLikelihood_HammingLoss.learn  =_extendedLearn
-MaxLikelihood_GeneralizedHammingLoss.learn  =_extendedLearn
 
-StructPerceptron_HammingLoss.learn  =_extendedLearn
-StructPerceptron_GeneralizedHammingLoss.learn  =_extendedLearn
 
-SubgradientSSVM_HammingLoss.learn  =_extendedLearn
-SubgradientSSVM_GeneralizedHammingLoss.learn  =_extendedLearn
+
+
+DatasetWithFlexibleLoss.lossType = 'flexible'
+
+
+class LossParameter(FlexibleLossParameter):
+    def __init__(self, lossType, labelMult=None, nodeMult=None, factorMult=None):
+        super(LossParameter, self).__init__()
+
+        self.lossType = to_native_boost_python_enum_converter(lossType,self.lossType.__class__)
+
+        if labelMult is not None:
+            assert self.lossType == LossType.hamming
+            self.setLabelLossMultiplier(labelMult)
+        if nodeMult is not None:
+            assert self.lossType != LossType.partition
+            self.setNodeLossMultiplier(nodeMult)
+        if factorMult is not None:
+            assert self.lossType == LossType.partition
+            self.setFactorLossMultiplier(factorMult)
+
+
+
+GridSearch_FlexibleLoss.learn  =_extendedLearn
+#MaxLikelihood_FlexibleLoss.learn  =_extendedLearn
+StructPerceptron_FlexibleLoss.learn  =_extendedLearn
+SubgradientSSVM_FlexibleLoss.learn  =_extendedLearn
+
 
 
 if opengmConfig.withCplex or opengmConfig.withGurobi :
-    StructMaxMargin_Bundle_HammingLoss.learn = _extendedLearn
-    StructMaxMargin_Bundle_GeneralizedHammingLoss.learn = _extendedLearn
+    StructMaxMargin_Bundle_FlexibleLoss = _extendedLearn
+
+DatasetWithFlexibleLoss.getLoss = _extendedGetLoss
+DatasetWithFlexibleLoss.getTotalLoss = _extendedGetTotalLoss
 
-DatasetWithHammingLoss.getLoss = _extendedGetLoss
-DatasetWithHammingLoss.getTotalLoss = _extendedGetTotalLoss
-DatasetWithGeneralizedHammingLoss.getLoss = _extendedGetLoss
-DatasetWithGeneralizedHammingLoss.getTotalLoss = _extendedGetTotalLoss
 
-def createDataset(numWeights, loss='hamming', numInstances=0):
+def createDataset(numWeights,  numInstances=0):
     weightVals = numpy.ones(numWeights)
     weights = Weights(weightVals)
 
-    if loss not in ['hamming','h','gh','generalized-hamming']:
-        raise RuntimeError("loss must be 'hamming' /'h' or 'generalized-hamming'/'gh' ")    
-
-    if loss in ['hamming','h']:
-        dataset = DatasetWithHammingLoss(int(numInstances))
-    elif loss in ['generalized-hamming','gh']:
-        dataset = DatasetWithGeneralizedHammingLoss(int(numInstances))
-    else:
-        raise RuntimeError("loss must be 'hamming' /'h' or 'generalized-hamming'/'gh' ")   
-
+    # if loss not in ['hamming','h','gh','generalized-hamming']:
+    #     raise RuntimeError("loss must be 'hamming' /'h' or 'generalized-hamming'/'gh' ")    
+    # if loss in ['hamming','h']:
+    #     dataset = DatasetWithHammingLoss(int(numInstances))
+    # elif loss in ['generalized-hamming','gh']:
+    #     dataset = DatasetWithGeneralizedHammingLoss(int(numInstances))
+    # else:
+    #     raise RuntimeError("loss must be 'hamming' /'h' or 'generalized-hamming'/'gh' ")   
+    dataset = DatasetWithFlexibleLoss(numInstances)
     dataset.setWeights(weights)
     return dataset
 
@@ -74,13 +90,9 @@ def createDataset(numWeights, loss='hamming', numInstances=0):
 
 
 def gridSearchLearner(dataset, lowerBounds, upperBounds, nTestPoints):
-
-    if dataset.__class__.lossType == 'hamming':
-        learnerCls = GridSearch_HammingLoss
-        learnerParamCls = GridSearch_HammingLossParameter
-    elif dataset.__class__.lossType == 'generalized-hamming':
-        learnerCls = GridSearch_GeneralizedHammingLoss
-        learnerParamCls = GridSearch_GeneralizedHammingLossParameter
+    assert dataset.__class__.lossType == 'flexible'
+    learnerCls = GridSearch_FlexibleLoss
+    learnerParamCls = GridSearch_FlexibleLossParameter
 
     nr = numpy.require 
     sizeT_type = 'uint64'
@@ -97,15 +109,10 @@ def gridSearchLearner(dataset, lowerBounds, upperBounds, nTestPoints):
 
 def structPerceptron(dataset, learningMode='online',eps=1e-5, maxIterations=10000, stopLoss=0.0, decayExponent=0.0, decayT0=0.0):
 
-
-    if dataset.__class__.lossType == 'hamming':
-        learnerCls = StructPerceptron_HammingLoss
-        learnerParamCls = StructPerceptron_HammingLossParameter
-        learningModeEnum = StructPerceptron_HammingLossParameter_LearningMode
-    elif dataset.__class__.lossType == 'generalized-hamming':
-        learnerCls = StructPerceptron_GeneralizedHammingLossParameter
-        learnerParamCls = StructPerceptron_GeneralizedHammingLoss
-        learningModeEnum = StructPerceptron_GeneralizedHammingLossParameter_LearningMode
+    assert dataset.__class__.lossType == 'flexible'
+    learnerCls = StructPerceptron_FlexibleLoss
+    learnerParamCls = StructPerceptron_FlexibleLossParameter
+    learningModeEnum = StructPerceptron_FlexibleLossParameter_LearningMode
 
     lm = None
     if learningMode not in ['online','batch']:
@@ -129,15 +136,10 @@ def structPerceptron(dataset, learningMode='online',eps=1e-5, maxIterations=1000
 
 def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000, stopLoss=0.0, learningRate=1.0, C=100.0):
 
-
-    if dataset.__class__.lossType == 'hamming':
-        learnerCls = SubgradientSSVM_HammingLoss
-        learnerParamCls = SubgradientSSVM_HammingLossParameter
-        learningModeEnum = SubgradientSSVM_HammingLossParameter_LearningMode
-    elif dataset.__class__.lossType == 'generalized-hamming':
-        learnerCls = SubgradientSSVM_GeneralizedHammingLossParameter
-        learnerParamCls = SubgradientSSVM_GeneralizedHammingLoss
-        learningModeEnum = SubgradientSSVM_GeneralizedHammingLossParameter_LearningMode
+    assert dataset.__class__.lossType == 'flexible'
+    learnerCls = SubgradientSSVM_FlexibleLoss
+    learnerParamCls = SubgradientSSVM_FlexibleLossParameter
+    learningModeEnum = SubgradientSSVM_FlexibleLossParameter_LearningMode
 
     lm = None
     if learningMode not in ['online','batch']:
@@ -164,12 +166,10 @@ def structMaxMarginLearner(dataset, regularizerWeight=1.0, minEps=1e-5, nSteps=0
         if optimizer != 'bundle':
             raise RuntimeError("Optimizer type must be 'bundle' for now!")
 
-        if dataset.__class__.lossType == 'hamming':
-            learnerCls = StructMaxMargin_Bundle_HammingLoss
-            learnerParamCls = StructMaxMargin_Bundle_HammingLossParameter
-        elif dataset.__class__.lossType == 'generalized-hamming':
-            learnerCls = StructMaxMargin_Bundle_GeneralizedHammingLoss
-            learnerParamCls = StructMaxMargin_Bundle_GeneralizedHammingLossParameter
+
+        assert dataset.__class__.lossType == 'flexible'
+        learnerCls = StructMaxMargin_FlexibleLoss
+        learnerParamCls = StructMaxMargin_FlexibleLossParameter
 
         epsFromGap = False
         if epsStrategy == 'gap':
@@ -184,18 +184,20 @@ def structMaxMarginLearner(dataset, regularizerWeight=1.0, minEps=1e-5, nSteps=0
     else:
         raise RuntimeError("this learner needs widthCplex or withGurobi")
 
-def maxLikelihoodLearner(dataset):
-    if dataset.__class__.lossType == 'hamming':
-        learnerCls = MaxLikelihood_HammingLoss
-        learnerParamCls = MaxLikelihood_HammingLossParameter
-    elif dataset.__class__.lossType == 'generalized-hamming':
-        learnerCls = MaxLikelihood_GeneralizedHammingLoss
-        learnerParamCls = MaxLikelihood_GeneralizedHammingLossParameter
 
-    param = learnerParamCls()
-    learner = learnerCls(dataset, param)
+# def maxLikelihoodLearner(dataset):
+#     raise RuntimeError("not yet implemented / wrapped fully")
+#     if dataset.__class__.lossType == 'hamming':
+#         learnerCls = MaxLikelihood_HammingLoss
+#         learnerParamCls = MaxLikelihood_HammingLossParameter
+#     elif dataset.__class__.lossType == 'generalized-hamming':
+#         learnerCls = MaxLikelihood_GeneralizedHammingLoss
+#         learnerParamCls = MaxLikelihood_GeneralizedHammingLossParameter
+
+#     param = learnerParamCls()
+#     learner = learnerCls(dataset, param)
         
-    return learner
+#     return learner
 
 
 
diff --git a/src/interfaces/python/opengm/learning/learning.cxx b/src/interfaces/python/opengm/learning/learning.cxx
index 000a869..ad80980 100644
--- a/src/interfaces/python/opengm/learning/learning.cxx
+++ b/src/interfaces/python/opengm/learning/learning.cxx
@@ -9,7 +9,7 @@
 #include <opengm/learning/loss/hammingloss.hxx>
 #include <opengm/learning/loss/generalized-hammingloss.hxx>
 #include <opengm/learning/loss/noloss.hxx>
-//#include <opengm/learning/loss/flexibleloss.hxx>
+#include <opengm/learning/loss/flexibleloss.hxx>
 
 #if defined(WITH_CPLEX) || defined(WITH_GUROBI)
 #include <opengm/learning/bundle-optimizer.hxx>
@@ -36,8 +36,8 @@ namespace opengm{
     template<class DATASET, class OPTIMIZER>
     void export_struct_max_margin_bundle_learner(const std::string & clsName);
 
-    template<class DATASET>
-    void export_max_likelihood_learner(const std::string & clsName);
+    //template<class DATASET>
+    //void export_max_likelihood_learner(const std::string & clsName);
 
     template<class DATASET>
     void export_struct_perceptron_learner(const std::string & clsName);
@@ -71,29 +71,19 @@ BOOST_PYTHON_MODULE_INIT(_learning) {
     opengm::export_loss<op::GmAdder>();
 
     // templated datasets
-    opengm::export_dataset<op::GmAdder, ol::HammingLoss >("DatasetWithHammingLoss");
-    opengm::export_dataset<op::GmAdder, ol::GeneralizedHammingLoss >("DatasetWithGeneralizedHammingLoss");
-    //opengm::export_dataset<op::GmAdder, ol::FlexibleLoss >("DatasetWithFlexibleLoss");
+    opengm::export_dataset<op::GmAdder, ol::FlexibleLoss >("DatasetWithFlexibleLoss");
 
 
 
-    opengm::export_grid_search_learner<op::GmAdderHammingLossDataset>("GridSearch_HammingLoss");
-    opengm::export_grid_search_learner<op::GmAdderGeneralizedHammingLossDataset>("GridSearch_GeneralizedHammingLoss");
-    //opengm::export_grid_search_learner<op::GmAdderGeneralizedHammingLossDataset>("GridSearch_FlexibleLoss");
-    
+    opengm::export_grid_search_learner<op::GmAdderFlexibleLossDataset>("GridSearch_FlexibleLoss");
+    opengm::export_struct_perceptron_learner<op::GmAdderFlexibleLossDataset>("StructPerceptron_FlexibleLoss");
+    opengm::export_subgradient_ssvm_learner<op::GmAdderFlexibleLossDataset>("SubgradientSSVM_FlexibleLoss");
 
-    opengm::export_struct_perceptron_learner<op::GmAdderHammingLossDataset>("StructPerceptron_HammingLoss");
-    opengm::export_struct_perceptron_learner<op::GmAdderGeneralizedHammingLossDataset>("StructPerceptron_GeneralizedHammingLoss");
-    //opengm::export_struct_perceptron_learner<op::GmAdderGeneralizedHammingLossDataset>("StructPerceptron_FlexibleLoss");
-    
-    opengm::export_subgradient_ssvm_learner<op::GmAdderHammingLossDataset>("SubgradientSSVM_HammingLoss");
-    opengm::export_subgradient_ssvm_learner<op::GmAdderGeneralizedHammingLossDataset>("SubgradientSSVM_GeneralizedHammingLoss");
 
-    opengm::export_max_likelihood_learner<op::GmAdderHammingLossDataset>("MaxLikelihood_HammingLoss");
-    opengm::export_max_likelihood_learner<op::GmAdderGeneralizedHammingLossDataset>("MaxLikelihood_GeneralizedHammingLoss");
+    //opengm::export_max_likelihood_learner<op::GmAdderFlexibleLossDataset>("MaxLikelihood_FlexibleLoss");
+
     
     #if defined(WITH_CPLEX) || defined(WITH_GUROBI)
-        opengm::export_struct_max_margin_bundle_learner< op::GmAdderHammingLossDataset, ol::BundleOptimizer<op::GmValueType> >("StructMaxMargin_Bundle_HammingLoss");
-        opengm::export_struct_max_margin_bundle_learner< op::GmAdderGeneralizedHammingLossDataset, ol::BundleOptimizer<op::GmValueType> >("StructMaxMargin_Bundle_GeneralizedHammingLoss");
+        opengm::export_struct_max_margin_bundle_learner< op::GmAdderFlexibleLossDataset, ol::BundleOptimizer<op::GmValueType> >("StructMaxMargin_Bundle_FlexibleLoss");
     #endif
 }
diff --git a/src/interfaces/python/opengm/learning/pyDataset.cxx b/src/interfaces/python/opengm/learning/pyDataset.cxx
index 5eae4a4..5d8068a 100644
--- a/src/interfaces/python/opengm/learning/pyDataset.cxx
+++ b/src/interfaces/python/opengm/learning/pyDataset.cxx
@@ -84,8 +84,10 @@ void export_dataset(const std::string& className){
            .def("getNumberOfModels", &PyDataset::getNumberOfModels)
            .def("setInstance", &pySetInstance<GM,LOSS>)
            .def("setInstanceWithLossParam", &pySetInstanceWithLossParam<GM,LOSS>)
+           .def("setInstance", &pySetInstanceWithLossParam<GM,LOSS>)
            .def("pushBackInstance", &pyPushBackInstance<GM,LOSS>)
            .def("pushBackInstanceWithLossParam", &pyPushBackInstanceWithLossParam<GM,LOSS>)
+           .def("pushBackInstance", &pyPushBackInstanceWithLossParam<GM,LOSS>)
            .def("setWeights", &PyDataset::setWeights)
            .def("save", &pySaveDataset<GM, LOSS>)
            .def("load", &pyLoadDataset<GM, LOSS>)
@@ -95,8 +97,8 @@ void export_dataset(const std::string& className){
 }
 
 
-template void export_dataset<opengm::python::GmAdder, opengm::learning::HammingLoss> (const std::string& className);
+//template void export_dataset<opengm::python::GmAdder, opengm::learning::HammingLoss> (const std::string& className);
 //template void export_dataset<opengm::python::GmAdder, opengm::learning::NoLoss> (const std::string& className);
-template void export_dataset<opengm::python::GmAdder, opengm::learning::GeneralizedHammingLoss> (const std::string& className);
+template void export_dataset<opengm::python::GmAdder, opengm::learning::FlexibleLoss> (const std::string& className);
 
 }
diff --git a/src/interfaces/python/opengm/learning/pyGridSearchLearner.cxx b/src/interfaces/python/opengm/learning/pyGridSearchLearner.cxx
index 9e4eccf..412117c 100644
--- a/src/interfaces/python/opengm/learning/pyGridSearchLearner.cxx
+++ b/src/interfaces/python/opengm/learning/pyGridSearchLearner.cxx
@@ -55,10 +55,10 @@ namespace opengm{
     }
 
     template void 
-    export_grid_search_learner<op::GmAdderHammingLossDataset> (const std::string& className);
+    export_grid_search_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
 
-    template void 
-    export_grid_search_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
+    //template void 
+    //export_grid_search_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
 }
 
 
diff --git a/src/interfaces/python/opengm/learning/pyLoss.cxx b/src/interfaces/python/opengm/learning/pyLoss.cxx
index d9b84c8..614620c 100644
--- a/src/interfaces/python/opengm/learning/pyLoss.cxx
+++ b/src/interfaces/python/opengm/learning/pyLoss.cxx
@@ -3,9 +3,9 @@
 #include <stdexcept>
 #include <stddef.h>
 
-#include <opengm/learning/loss/hammingloss.hxx>
-#include <opengm/learning/loss/generalized-hammingloss.hxx>
-#include <opengm/learning/loss/noloss.hxx>
+//#include <opengm/learning/loss/hammingloss.hxx>
+//#include <opengm/learning/loss/generalized-hammingloss.hxx>
+#include <opengm/learning/loss/flexibleloss.hxx>
 #include <opengm/python/opengmpython.hxx>
 #include <opengm/python/converter.hxx>
 
@@ -13,67 +13,66 @@ using namespace boost::python;
 
 namespace opengm{
     
-void pySetNodeLossMultiplier(opengm::learning::GeneralizedHammingLoss::Parameter& p,
+void pySetNodeLossMultiplier(opengm::learning::FlexibleLoss::Parameter& p,
                              const opengm::python::NumpyView<double,1>& m)
 {
     p.nodeLossMultiplier_ = std::vector<double>(m.begin(), m.end());
 }
 
-void pySetLabelLossMultiplier(opengm::learning::GeneralizedHammingLoss::Parameter& p,
+void pySetLabelLossMultiplier(opengm::learning::FlexibleLoss::Parameter& p,
                              const opengm::python::NumpyView<double,1>& m)
 {
     p.labelLossMultiplier_ = std::vector<double>(m.begin(), m.end());
 }
+void pySetFactorLossMultiplier(opengm::learning::FlexibleLoss::Parameter& p,
+                               const opengm::python::NumpyView<double,1>& m)
+{
+    p.labelLossMultiplier_ = std::vector<double>(m.begin(), m.end());
+}
+
 
 template <class GM>
 void export_loss(){
    typedef typename std::vector<typename GM::LabelType>::const_iterator Literator;
    typedef typename std::vector<typename GM::LabelType>::const_iterator Niterator;
    typedef opengm::learning::HammingLoss PyHammingLoss;
+   typedef opengm::learning::FlexibleLoss PyFlexibleLoss;
    typedef opengm::learning::GeneralizedHammingLoss PyGeneralizedHammingLoss;
    typedef opengm::learning::NoLoss PyNoLoss;
 
 
 
-    typedef opengm::learning::GeneralizedHammingLoss::Parameter PyGeneralizedHammingLossParameter;
 
-    class_<PyHammingLoss >("HammingLoss")
-        //.def("loss", &PyHammingLoss::loss<const GM &, Literator,Literator>)
-        //.def("addLoss", &PyHammingLoss::addLoss<GM, Literator>)
-    ;
 
-    //class_<PyNoLoss >("NoLoss")
-    //    //.def("loss", &PyNoLoss::loss<const GM &,Literator,Literator>)
-    //    //.def("addLoss", &PyNoLoss::addLoss<GM, Literator>)
-    //;
 
-    class_<PyGeneralizedHammingLoss >("GeneralizedHammingLoss", init<PyGeneralizedHammingLossParameter>())
-        //.def("loss", &PyGeneralizedHammingLoss::loss<const GM &,Literator,Literator>)
-        //.def("addLoss", &PyGeneralizedHammingLoss::addLoss<GM, Literator>)
+    class_<PyFlexibleLoss >("FlexibleLoss")
+        //.def("loss", &PyHammingLoss::loss<const GM &, Literator,Literator>)
+        //.def("addLoss", &PyHammingLoss::addLoss<GM, Literator>)
     ;
 
-
-    class_<PyNoLoss::Parameter>("NoLossParameter")
+    // learner param enum
+    enum_<PyFlexibleLoss::Parameter::LossType>("LossType")
+      .value("hamming", PyFlexibleLoss::Parameter::Hamming)
+      .value("l1",  PyFlexibleLoss::Parameter::L1)
+      .value("l2",  PyFlexibleLoss::Parameter::L2)
+      .value("partiton",  PyFlexibleLoss::Parameter::Partition)
+      .value("ConfMat",  PyFlexibleLoss::Parameter::ConfMat)
     ;
 
-    class_<PyHammingLoss::Parameter>("HammingLossParameter")
-    ;
 
-    class_<PyGeneralizedHammingLossParameter>("GeneralizedHammingLossParameter")
+    class_<PyFlexibleLoss::Parameter>("FlexibleLossParameter")
+        .def_readwrite("lossType", &PyFlexibleLoss::Parameter::lossType_)
         .def("setNodeLossMultiplier", &pySetNodeLossMultiplier)
         .def("setLabelLossMultiplier", &pySetLabelLossMultiplier)
+        .def("setFactorLossMultiplier", &pySetFactorLossMultiplier)
     ;
 
-    //class_<std::vector< PyNoLoss::Parameter > >("NoLossParameterVector")
-    //    .def(vector_indexing_suite<std::vector< PyNoLoss::Parameter> >())
-    //;
-    class_<std::vector< PyHammingLoss::Parameter > >("HammingLossParameterVector")
-        .def(vector_indexing_suite<std::vector< PyHammingLoss::Parameter> >())
-    ;
-    class_<std::vector< PyGeneralizedHammingLoss::Parameter > >("GeneralizedHammingLossParameterVector")
-        .def(vector_indexing_suite<std::vector< PyGeneralizedHammingLoss::Parameter> >())
+
+    class_<std::vector< PyFlexibleLoss::Parameter > >("FlexibleLossParameterVector")
+        .def(vector_indexing_suite<std::vector< PyFlexibleLoss::Parameter> >())
     ;
 
+
 }
 
 
diff --git a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
index 0749629..4fcd459 100644
--- a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
+++ b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
@@ -43,11 +43,11 @@ namespace opengm{
         ;
     }
 
-    template void
-    export_max_likelihood_learner<op::GmAdderHammingLossDataset> (const std::string& className);
+    //template void
+    //export_max_likelihood_learner<op::GmAdderHammingLossDataset> (const std::string& className);
 
     template void
-    export_max_likelihood_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
+    export_max_likelihood_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
 }
 
 
diff --git a/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx b/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
index 271ec1e..e8d5ba7 100644
--- a/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
+++ b/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
@@ -55,10 +55,8 @@ namespace opengm{
     }
 
     template void
-    export_struct_max_margin_bundle_learner<op::GmAdderHammingLossDataset, ol::BundleOptimizer<op::GmValueType> > (const std::string& className);
+    export_struct_max_margin_bundle_learner<op::GmAdderFlexibleLossDataset, ol::BundleOptimizer<op::GmValueType> > (const std::string& className);
 
-    template void
-    export_struct_max_margin_bundle_learner<op::GmAdderGeneralizedHammingLossDataset, ol::BundleOptimizer<op::GmValueType> > (const std::string& className);
 }
 
 
diff --git a/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx b/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
index f5554a7..6e3633e 100644
--- a/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
+++ b/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
@@ -66,10 +66,10 @@ namespace opengm{
     }
 
     template void 
-    export_struct_perceptron_learner<op::GmAdderHammingLossDataset> (const std::string& className);
+    export_struct_perceptron_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
 
-    template void 
-    export_struct_perceptron_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
+    // template void 
+    // export_struct_perceptron_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
 }
 
 
diff --git a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx b/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
index 2ca92c9..4de1360 100644
--- a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
+++ b/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
@@ -65,11 +65,14 @@ namespace opengm{
         ;
     }
 
-    template void 
-    export_subgradient_ssvm_learner<op::GmAdderHammingLossDataset> (const std::string& className);
+    // template void 
+    // export_subgradient_ssvm_learner<op::GmAdderHammingLossDataset> (const std::string& className);
+
+    // template void 
+    // export_subgradient_ssvm_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
 
     template void 
-    export_subgradient_ssvm_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
+    export_subgradient_ssvm_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
 }
 
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list