[opengm] 193/386: started to add rebing to all inference methods

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:38 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 0320870c507c6159b4f73ee0a11bfae6c5437826
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Tue Dec 23 15:25:29 2014 +0100

    started to add rebing to all inference methods
---
 .gitignore                                         |   7 +
 include/opengm/functions/learnable/lunary.hxx      | 313 +++++++++++++++------
 include/opengm/inference/alphabetaswap.hxx         |  33 ++-
 include/opengm/inference/alphaexpansion.hxx        |  85 ++++--
 include/opengm/inference/alphaexpansionfusion.hxx  |  75 +++--
 include/opengm/inference/astar.hxx                 | 129 +++++----
 include/opengm/inference/bruteforce.hxx            |  21 +-
 include/opengm/inference/combilp.hxx               |  11 +
 .../dualdecomposition/dualdecomposition_bundle.hxx |  28 ++
 .../dualdecomposition_subgradient.hxx              |  37 ++-
 include/opengm/inference/dynamicprogramming.hxx    |  20 ++
 include/opengm/inference/external/qpbo.hxx         |  35 ++-
 include/opengm/inference/icm.hxx                   |  74 +++--
 include/opengm/learning/dataset/dataset.hxx        |   6 +
 .../opengm/learning/dataset/editabledataset.hxx    |   2 +
 .../learning/loss/generalized-hammingloss.hxx      |  20 +-
 include/opengm/python/opengmpython.hxx             |   4 -
 src/interfaces/python/opengm/functionhelper.py     |   4 +-
 src/interfaces/python/opengm/learning/__init__.py  |  94 +++++--
 src/interfaces/python/opengm/learning/learning.cxx |   6 +-
 .../python/opengm/learning/pyDataset.cxx           |  17 +-
 .../python/opengm/learning/pyLFunctionGen.cxx      |  48 +++-
 .../python/opengm/opengmcore/__init__.py           |   9 -
 .../python/opengm/opengmcore/function_injector.py  |  11 +-
 .../python/opengm/opengmcore/pyFunctionTypes.cxx   |  26 +-
 src/interfaces/python/opengm/opengmcore/pyGm.cxx   |   8 -
 26 files changed, 762 insertions(+), 361 deletions(-)

diff --git a/.gitignore b/.gitignore
index 44a7094..29d22cb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,10 @@
 
 *~
 *.swp
+
+pascal_voc_val_gm_*.h5
+pascal_voc_train_gm*.h5
+pascal_voc_train_info.h5
+pascal_voc_val_info.h5
+data_train.pickle
+data_val.pickle
diff --git a/include/opengm/functions/learnable/lunary.hxx b/include/opengm/functions/learnable/lunary.hxx
index df7b6c5..89b8c93 100644
--- a/include/opengm/functions/learnable/lunary.hxx
+++ b/include/opengm/functions/learnable/lunary.hxx
@@ -22,7 +22,7 @@ namespace learnable {
 template<class V, class I>
 struct FeaturesAndIndices{
     std::vector<V> features;
-    std::vector<I> indices;
+    std::vector<I> weightIds;
 };
 
 
@@ -43,6 +43,16 @@ public:
         const opengm::learning::Weights<T>&     weights,
         std::vector<FeaturesAndIndices<T, I> >  featuresAndIndicesPerLabel
     );
+
+    LUnary(
+        const opengm::learning::Weights<T>& weights,    
+        const LabelType                     numberOfLabels,
+        marray::Marray< size_t >            weightIds,
+        marray::Marray< ValueType>          features,
+        const bool                          makeFirstEntryConst
+    );
+
+
     L shape(const size_t) const;
     size_t size() const;
     size_t dimension() const;
@@ -54,7 +64,7 @@ public:
     }
 
     size_t numberOfWeights()const{
-        return numWeights_;
+        return weightIds_.size();
     }
 
     I weightIndex(const size_t weightNumber) const{
@@ -65,20 +75,108 @@ public:
     T weightGradient(size_t,ITERATOR) const;
 
 private:
-    bool isMatchingWeight(const LabelType l , const size_t i ){
 
-    }
+
 protected:
+
+    size_t numWeightsForL(const LabelType l )const{
+        return offsets_[0*numberOfLabels_ + l];
+    }
+    size_t weightIdOffset(const LabelType l )const{
+        return offsets_[1*numberOfLabels_ + l];
+    }
+    size_t featureOffset(const LabelType l )const{
+        return offsets_[2*numberOfLabels_ + l];
+    }
+
     mutable const opengm::learning::Weights<T> *    weights_;
-    std::vector<size_t> labelOffset_;
+
+    opengm::UInt16Type numberOfLabels_;
+    std::vector<opengm::UInt16Type> offsets_;
     std::vector<size_t> weightIds_;
-    std::vector<V>      features_;
-    size_t numWeights_;
+    std::vector<ValueType> features_;
+
+
     friend class opengm::FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >;
 
 
 };
 
+template <class T, class I, class L>
+LUnary<T, I, L>::LUnary(
+    const opengm::learning::Weights<T>& weights,    
+    const LabelType                     numberOfLabels,
+    marray::Marray< size_t >            weightIds,
+    marray::Marray< ValueType>          features,
+    const bool                          makeFirstEntryConst
+)
+:  
+weights_(&weights),
+numberOfLabels_(numberOfLabels), 
+offsets_(numberOfLabels*3),
+weightIds_(),
+features_()
+{
+    const size_t pFeatDim       = features.dimension();
+    const size_t pWeightIdDim   = weightIds.dimension();
+
+    OPENGM_CHECK_OP(weightIds.dimension(), ==, 2 , "wrong dimension");
+    OPENGM_CHECK_OP(weightIds.shape(0)+int(makeFirstEntryConst), ==, numberOfLabels , "wrong shape");
+
+
+    const size_t nWeights = weightIds.size();
+    weightIds_.resize(nWeights);
+
+    const size_t nFeat  = features.size();
+    features_.resize(nFeat);
+
+
+    OPENGM_CHECK_OP(features.dimension(), == , 1 , "feature dimension must be 1 ");
+    OPENGM_CHECK_OP(features.shape(0), == , weightIds.shape(1) , "feature dimension must be 1");
+
+    // copy features
+    for(size_t fi=0; fi<nFeat; ++fi){
+        features_[fi] = features(fi);
+    }
+
+    size_t nwForL = weightIds.shape(1);
+    size_t wOffset = 0;
+
+    if(makeFirstEntryConst){
+
+        OPENGM_CHECK_OP(numberOfLabels_-1, == , weightIds.shape(0),"internal error");
+
+        offsets_[0*numberOfLabels_ + 0] = 0;
+        offsets_[1*numberOfLabels_ + 0] = 0;
+        offsets_[2*numberOfLabels_ + 0] = 0;
+
+        for(LabelType l=1; l<numberOfLabels_; ++l){
+            offsets_[0*numberOfLabels_ + l] = nwForL;
+            offsets_[1*numberOfLabels_ + l] = wOffset;
+            offsets_[2*numberOfLabels_ + l] = 0;
+            // copy weight ids
+            for(size_t wi=0; wi<nwForL; ++wi){
+                weightIds_[wOffset + wi] = weightIds(l-1,wi);
+            }
+            wOffset += nwForL;
+        }
+    }
+    else{
+        OPENGM_CHECK_OP(numberOfLabels_, == , weightIds.shape(0),"internal error");
+        for(LabelType l=0; l<numberOfLabels_; ++l){
+
+            offsets_[0*numberOfLabels_ + l] = nwForL;
+            offsets_[1*numberOfLabels_ + l] = wOffset;
+            offsets_[2*numberOfLabels_ + l] = 0;
+            // copy weight ids
+            for(size_t wi=0; wi<nwForL; ++wi){
+                weightIds_[wOffset + wi] = weightIds(l,wi);
+            }
+            wOffset += nwForL;
+        }
+    }
+
+}
 
 template <class T, class I, class L>
 inline
@@ -88,39 +186,50 @@ LUnary<T, I, L>::LUnary
    std::vector<FeaturesAndIndices<V, I> >  featuresAndIndicesPerLabel = std::vector<FeaturesAndIndices<V, I> >()
 )
 :  
-weights_(&weights), 
-labelOffset_(featuresAndIndicesPerLabel.size(),0), 
+weights_(&weights),
+numberOfLabels_(featuresAndIndicesPerLabel.size()), 
+offsets_(featuresAndIndicesPerLabel.size()*3),
 weightIds_(),
-features_(),
-numWeights_(0){
+features_()
+{
 
-    // collect how many weights there are at all 
-    // for this function
-    size_t offset = 0 ;
-    for(size_t l=0; l<featuresAndIndicesPerLabel.size(); ++l){
-        const size_t nForThisL = featuresAndIndicesPerLabel[l].features.size();
-        numWeights_ += nForThisL;
-    }
+    size_t fOffset = 0;
+    size_t wOffset = 0;
 
-    weightIds_.resize(numWeights_);
-    features_.resize(numWeights_);
 
+    // fetch the offsets
     for(size_t l=0; l<featuresAndIndicesPerLabel.size(); ++l){
-        labelOffset_[l] = offset;
-        const size_t nForThisL = featuresAndIndicesPerLabel[l].features.size();
-        for(size_t i=0; i<nForThisL; ++i){
+        const size_t nwForL  = featuresAndIndicesPerLabel[l].weightIds.size();
+        const size_t nfForL  = featuresAndIndicesPerLabel[l].features.size();
+        OPENGM_CHECK_OP(nwForL, == , nfForL, "number of features and weighs"
+            "must be the same for a given label within this overload of LUnary<T, I, L>::LUnary");
+
+        offsets_[0*numberOfLabels_ + l] = nwForL;
+        offsets_[1*numberOfLabels_ + l] = wOffset;
+        offsets_[2*numberOfLabels_ + l] = fOffset;
 
-            // as many features as labels
-            OPENGM_CHECK_OP( featuresAndIndicesPerLabel[l].indices.size(), == ,
-                             featuresAndIndicesPerLabel[l].features.size() ,
-                             "features and weights must be of same length");
+        wOffset += nwForL;
+        fOffset += nfForL;
+    }
 
-            weightIds_[offset + i] = featuresAndIndicesPerLabel[l].indices[i];
-            features_[offset + i] = featuresAndIndicesPerLabel[l].features[i];
+    weightIds_.resize(wOffset);
+    features_.resize(fOffset);
 
+    // write weightIDs and features
+    for(size_t l=0; l<featuresAndIndicesPerLabel.size(); ++l){
+        const size_t nwForL = numWeightsForL(l);
+        for(size_t i=0; i<nwForL; ++i){
+            weightIds_[featureOffset(l)+i] = featuresAndIndicesPerLabel[l].weightIds[i];
+            features_[featureOffset(l)+i] = featuresAndIndicesPerLabel[l].features[i];
         }
-        offset+=nForThisL;
     }
+
+    // check that there are no duplicates
+    RandomAccessSet<size_t> idSet;
+    idSet.reserve(weightIds_.size());
+    idSet.insert(weightIds_.begin(), weightIds_.end());
+
+    OPENGM_CHECK_OP(idSet.size(), == , weightIds_.size(), "weightIds has duplicates");
 }
 
 
@@ -135,28 +244,17 @@ LUnary<T, I, L>::weightGradient
 ) const {
     OPENGM_CHECK_OP(weightNumber,<,numberOfWeights(), 
         "weightNumber must be smaller than number of weights");
-    const L l = *begin;
-
-    if(l == size()-1){
-        if(labelOffset_[l]>labelOffset_[l-1]){
-            size_t start = labelOffset_[l];
-            if(weightNumber>=start){
-                return features_[weightNumber];
-            }
-        }
-        else{
-            return V(0);
-        }
-    }
-    else{
-        size_t start = labelOffset_[l];
-        size_t   end = labelOffset_[l+1];
-        if(weightNumber>= start && weightNumber<end){
-            return features_[weightNumber];
+    const LabelType l(*begin);
+    const size_t nwForL = numWeightsForL(l);
+    if(nwForL>0){
+        const size_t wiStart = weightIdOffset(l);
+        const size_t wiEnd   = weightIdOffset(l)+nwForL;
+        if(weightNumber >= wiStart && weightNumber < wiEnd ){
+            const size_t wii = weightNumber - wiStart;
+            return features_[featureOffset(l) + wii];
         }
     }
-    return V(0);
-
+    return static_cast<ValueType>(0);
 }
 
 template <class T, class I, class L>
@@ -166,11 +264,14 @@ LUnary<T, I, L>::operator()
 (
    ITERATOR begin
 ) const {
+
+
     T val = 0;
-    const size_t oBegin = labelOffset_[*begin];
-    const size_t oEnd = (*begin == size()-1 ? numberOfWeights() : labelOffset_[*begin+1] );
-    for(size_t i=oBegin;i<oEnd;++i){
-        val += weights_->getWeight(weightIds_[i]) * features_[i];
+    const LabelType l(*begin);
+    const size_t nwForL = numWeightsForL(l);
+    for(size_t i=0; i<nwForL; ++i){
+        const size_t wi = weightIds_[weightIdOffset(l)+i];
+        val += weights_->getWeight(wi) * features_[featureOffset(l)+i];
     }
     return val;
 }
@@ -182,7 +283,7 @@ LUnary<T, I, L>::shape
 (
    const size_t i
 ) const {
-   return labelOffset_.size();
+   return numberOfLabels_;
 }
 
 template <class T, class I, class L>
@@ -194,7 +295,7 @@ LUnary<T, I, L>::dimension() const {
 template <class T, class I, class L>
 inline size_t
 LUnary<T, I, L>::size() const {
-   return labelOffset_.size();
+   return numberOfLabels_;
 }
 
 } // namespace learnable
@@ -228,7 +329,16 @@ FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::indexSequ
 (
    const opengm::functions::learnable::LUnary<T, I, L> & src
 ) {
-  return 2 + src.size() + src.numberOfWeights(); 
+
+    size_t size = 0;
+    size += 1; // numberOfLabels
+    size += 1; // numberOfWeights
+    size += 1; // numberOfFeatures
+
+    size += 3*src.shape(0);         // offsets serialization 
+    size += src.weightIds_.size();  // weight id serialization
+
+    return size;
 }
 
 template<class T, class I, class L>
@@ -237,7 +347,7 @@ FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::valueSequ
 (
    const opengm::functions::learnable::LUnary<T, I, L> & src
 ) {
-  return src.numberOfWeights();
+  return src.features_.size(); // feature serialization
 }
 
 template<class T, class I, class L>
@@ -245,30 +355,46 @@ template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR >
 inline void
 FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::serialize
 (
-   const opengm::functions::learnable::LUnary<T, I, L> & src,
-   INDEX_OUTPUT_ITERATOR indexOutIterator,
-   VALUE_OUTPUT_ITERATOR valueOutIterator
+    const opengm::functions::learnable::LUnary<T, I, L> & src,
+    INDEX_OUTPUT_ITERATOR indexOutIterator,
+    VALUE_OUTPUT_ITERATOR valueOutIterator
 ) {
-   *indexOutIterator = src.size();
-   ++indexOutIterator; 
 
-   *indexOutIterator = src.numberOfWeights();
-   ++indexOutIterator;
+    ///////////////////////////////////////
+    /// INDEX SERIALIZATION
+    ////////////////////////////////////////
+    // number of labels
+    *indexOutIterator = src.shape(0);
+    ++indexOutIterator; 
+
+    // number of weights
+    *indexOutIterator = src.weightIds_.size();
+    ++indexOutIterator; 
+    
+    // number of features
+    *indexOutIterator = src.features_.size();
+    ++indexOutIterator; 
 
-    for(size_t l=0; l<src.size(); ++l){
-        *indexOutIterator = src.labelOffset_[l];
-        ++indexOutIterator; 
+    // offset serialization
+    for(size_t i=0; i<src.offsets_.size(); ++i){
+        *indexOutIterator = src.offsets_[i];
+        ++indexOutIterator;
     }
 
-    for(size_t i=0; i<src.numberOfWeights(); ++i){
+    // weight id serialization
+    for(size_t i=0; i<src.weightIds_.size(); ++i){
         *indexOutIterator = src.weightIds_[i];
         ++indexOutIterator;
+    }
 
+    ///////////////////////////////////////
+    /// VALUE SERIALIZATION
+    ////////////////////////////////////////
+    // feature serialization
+    for(size_t i=0; i<src.features_.size(); ++i){
         *valueOutIterator = src.features_[i];
         ++valueOutIterator;
     }
-
-    
 }
 
 template<class T, class I, class L>
@@ -280,32 +406,51 @@ FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::deseriali
    VALUE_INPUT_ITERATOR valueInIterator,
    opengm::functions::learnable::LUnary<T, I, L> & dst
 ) { 
-    const size_t numLabels = *indexInIterator;
+
+
+
+    ///////////////////////////////////////
+    /// INDEX DESERIALIZATION
+    ////////////////////////////////////////
+    // number of labels
+    dst.numberOfLabels_ = *indexInIterator;
     ++indexInIterator;
+    // resize offset accordingly
+    dst.offsets_.resize(3 * dst.numberOfLabels_);
+
 
-    dst.numWeights_ = *indexInIterator;
+    // number of weights
+    const size_t nW =*indexInIterator;
     ++indexInIterator;
+    // resize weightIds accordingly
+    dst.weightIds_.resize(nW);
 
-    dst.labelOffset_.resize(numLabels);
-    dst.weightIds_.resize(dst.numWeights_);
-    dst.features_.resize(dst.numWeights_);
+    // number of features
+    const size_t nF = *indexInIterator;
+    ++indexInIterator;
+    // resize weightIds accordingly
+    dst.features_.resize(nF);
 
-    // label offset
-    for(size_t l=0; l<numLabels; ++l){
-        dst.labelOffset_[l] = *indexInIterator;
+    // offset deserialization
+    for(size_t i=0; i<dst.offsets_.size(); ++i){
+        dst.offsets_[i] = *indexInIterator;
         ++indexInIterator;
     }
 
-    for(size_t i=0; i<dst.numWeights_; ++i){
+    // weight id deserialization
+    for(size_t i=0; i<dst.weightIds_.size(); ++i){
         dst.weightIds_[i] = *indexInIterator;
         ++indexInIterator;
+    }
 
+    ///////////////////////////////////////
+    /// VALUE DESERIALIZATION
+    ////////////////////////////////////////
+    // feature deserialization
+    for(size_t i=0; i<dst.features_.size(); ++i){
         dst.features_[i] = *valueInIterator;
         ++valueInIterator;
-    }
-
-
- 
+    } 
 }
 
 } // namespace opengm
diff --git a/include/opengm/inference/alphabetaswap.hxx b/include/opengm/inference/alphabetaswap.hxx
index c32461b..55c3eb0 100644
--- a/include/opengm/inference/alphabetaswap.hxx
+++ b/include/opengm/inference/alphabetaswap.hxx
@@ -22,14 +22,33 @@ public:
    typedef opengm::visitors::EmptyVisitor<AlphaBetaSwap<GM,INF> >   EmptyVisitorType;
    typedef opengm::visitors::TimingVisitor<AlphaBetaSwap<GM,INF> >  TimingVisitorType;
 
-   struct Parameter {
-      Parameter() {
-         maxNumberOfIterations_ = 1000;
-      }
 
-      typename InferenceType::Parameter parameter_; 
-      size_t maxNumberOfIterations_; 
-   };
+    template<class _GM>
+    struct RebindGm{
+        typedef typename INF:: template RebindGm<_GM>::type RebindedInf;
+        typedef AlphaBetaSwap<_GM, RebindedInf> type;
+    };
+
+    template<class _GM,class _ACC>
+    struct RebindGmAndAcc{
+        typedef typename INF:: template RebindGm<_GM,_ACC>::type RebindedInf;
+        typedef AlphaBetaSwap<_GM, RebindedInf> type;
+    };
+
+
+    struct Parameter {
+        Parameter() {
+            maxNumberOfIterations_ = 1000;
+        }
+        template<class P>
+        Parameter(const P & p)
+        :   parameter_(p.parameter_),
+            maxNumberOfIterations_(maxNumberOfIterations_){
+        }
+
+        typename InferenceType::Parameter parameter_; 
+        size_t maxNumberOfIterations_; 
+    };
 
    AlphaBetaSwap(const GraphicalModelType&, Parameter = Parameter());
    std::string name() const;
diff --git a/include/opengm/inference/alphaexpansion.hxx b/include/opengm/inference/alphaexpansion.hxx
index a1b9d04..6866ad9 100644
--- a/include/opengm/inference/alphaexpansion.hxx
+++ b/include/opengm/inference/alphaexpansion.hxx
@@ -22,35 +22,62 @@ public:
    typedef visitors::EmptyVisitor<AlphaExpansion<GM,INF> >   EmptyVisitorType;
    typedef visitors::TimingVisitor<AlphaExpansion<GM,INF> >  TimingVisitorType;
 
-   struct Parameter {
-      typedef typename InferenceType::Parameter InferenceParameter;
-      enum LabelingIntitialType {DEFAULT_LABEL, RANDOM_LABEL, LOCALOPT_LABEL, EXPLICIT_LABEL};
-      enum OrderType {DEFAULT_ORDER, RANDOM_ORDER, EXPLICIT_ORDER};
-
-      Parameter
-      (
-         const size_t maxNumberOfSteps  = 1000,
-         const InferenceParameter& para = InferenceParameter()
-      )
-      :  parameter_(para),
-         maxNumberOfSteps_(maxNumberOfSteps),
-         labelInitialType_(DEFAULT_LABEL),
-         orderType_(DEFAULT_ORDER),
-         randSeedOrder_(0),
-         randSeedLabel_(0),
-         labelOrder_(),
-         label_()
-      {}
-
-      InferenceParameter parameter_;
-      size_t maxNumberOfSteps_;
-      LabelingIntitialType labelInitialType_;
-      OrderType orderType_;
-      unsigned int randSeedOrder_;
-      unsigned int randSeedLabel_;
-      std::vector<LabelType> labelOrder_;
-      std::vector<LabelType> label_;
-   };
+    template<class _GM>
+    struct RebindGm{
+        typedef typename INF:: template RebindGm<_GM>::type RebindedInf;
+        typedef AlphaExpansion<_GM, RebindedInf> type;
+    };
+
+    template<class _GM,class _ACC>
+    struct RebindGmAndAcc{
+        typedef typename INF:: template RebindGm<_GM,_ACC>::type RebindedInf;
+        typedef AlphaExpansion<_GM, RebindedInf> type;
+    };
+
+    struct Parameter {
+        typedef typename InferenceType::Parameter InferenceParameter;
+        enum LabelingIntitialType {DEFAULT_LABEL, RANDOM_LABEL, LOCALOPT_LABEL, EXPLICIT_LABEL};
+        enum OrderType {DEFAULT_ORDER, RANDOM_ORDER, EXPLICIT_ORDER};
+
+        Parameter
+        (
+            const size_t maxNumberOfSteps  = 1000,
+            const InferenceParameter& para = InferenceParameter()
+        )
+        :   parameter_(para),
+            maxNumberOfSteps_(maxNumberOfSteps),
+            labelInitialType_(DEFAULT_LABEL),
+            orderType_(DEFAULT_ORDER),
+            randSeedOrder_(0),
+            randSeedLabel_(0),
+            labelOrder_(),
+            label_()
+        {}
+
+        template<class P>
+        Parameter
+        (
+            const P & p
+        )
+        :   parameter_(p.parameter_),
+            maxNumberOfSteps_(p.maxNumberOfSteps_),
+            labelInitialType_(p.labelInitialType_),
+            orderType_(p.orderType_),
+            randSeedOrder_(p.randSeedOrder_),
+            randSeedLabel_(p.randSeedLabel_),
+            labelOrder_(p.labelOrder_),
+            label_(p.labelOrder_)
+        {}
+
+        InferenceParameter parameter_;
+        size_t maxNumberOfSteps_;
+        LabelingIntitialType labelInitialType_;
+        OrderType orderType_;
+        unsigned int randSeedOrder_;
+        unsigned int randSeedLabel_;
+        std::vector<LabelType> labelOrder_;
+        std::vector<LabelType> label_;
+    };
 
    AlphaExpansion(const GraphicalModelType&, Parameter para = Parameter());
 
diff --git a/include/opengm/inference/alphaexpansionfusion.hxx b/include/opengm/inference/alphaexpansionfusion.hxx
index 81f7e6a..9c26741 100644
--- a/include/opengm/inference/alphaexpansionfusion.hxx
+++ b/include/opengm/inference/alphaexpansionfusion.hxx
@@ -27,30 +27,57 @@ public:
    typedef visitors::EmptyVisitor<AlphaExpansionFusion<GM,ACC> >   EmptyVisitorType;
    typedef visitors::TimingVisitor<AlphaExpansionFusion<GM,ACC> >  TimingVisitorType;
 
-   struct Parameter {
-      enum LabelingIntitialType {DEFAULT_LABEL, RANDOM_LABEL, LOCALOPT_LABEL, EXPLICIT_LABEL};
-      enum OrderType {DEFAULT_ORDER, RANDOM_ORDER, EXPLICIT_ORDER};
-
-      Parameter
-      (
-         const size_t maxNumberOfSteps  = 1000
-      )
-      :  maxNumberOfSteps_(maxNumberOfSteps),
-         labelInitialType_(DEFAULT_LABEL),
-         orderType_(DEFAULT_ORDER),
-         randSeedOrder_(0),
-         randSeedLabel_(0),
-         labelOrder_(),
-         label_()
-      {}
-
-      size_t maxNumberOfSteps_;
-      LabelingIntitialType labelInitialType_;
-      OrderType orderType_;
-      unsigned int randSeedOrder_;
-      unsigned int randSeedLabel_;
-      std::vector<LabelType> labelOrder_;
-      std::vector<LabelType> label_;
+    template<class _GM>
+    struct RebindGm{
+        typedef AlphaExpansionFusion<_GM, ACC> type;
+    };
+
+    template<class _GM,class _ACC>
+    struct RebindGmAndAcc{
+        typedef AlphaExpansionFusion<_GM, _ACC> type;
+    };
+
+    struct Parameter {
+        enum LabelingIntitialType {DEFAULT_LABEL, RANDOM_LABEL, 
+                                   LOCALOPT_LABEL, EXPLICIT_LABEL};
+        enum OrderType {DEFAULT_ORDER, RANDOM_ORDER, 
+                        EXPLICIT_ORDER};
+
+
+        Parameter
+        (
+            const size_t maxNumberOfSteps  = 1000
+        )
+        :   maxNumberOfSteps_(maxNumberOfSteps),
+            labelInitialType_(DEFAULT_LABEL),
+            orderType_(DEFAULT_ORDER),
+            randSeedOrder_(0),
+            randSeedLabel_(0),
+            labelOrder_(),
+            label_()
+        {}
+
+        template<class P>
+        Parameter
+        (
+            const P & p
+        )
+        :   maxNumberOfSteps_(p.maxNumberOfSteps_),
+            labelInitialType_(p.labelInitialType_),
+            orderType_(p.orderType_),
+            randSeedOrder_(p.randSeedOrder_),
+            randSeedLabel_(p.randSeedLabel_),
+            labelOrder_(p.labelOrder_),
+            label_(p.labelOrder_)
+        {}
+
+        size_t maxNumberOfSteps_;
+        LabelingIntitialType labelInitialType_;
+        OrderType orderType_;
+        unsigned int randSeedOrder_;
+        unsigned int randSeedLabel_;
+        std::vector<LabelType> labelOrder_;
+        std::vector<LabelType> label_;
    };
 
    AlphaExpansionFusion(const GraphicalModelType&, Parameter para = Parameter());
diff --git a/include/opengm/inference/astar.hxx b/include/opengm/inference/astar.hxx
index c776e09..5e2dde4 100644
--- a/include/opengm/inference/astar.hxx
+++ b/include/opengm/inference/astar.hxx
@@ -66,7 +66,6 @@ namespace opengm {
    public:
       ///graphical model type
       typedef GM                                          GraphicalModelType;
-      // -- obsolet --  typedef typename GraphicalModelType::template Rebind<true>::RebindType EditableGraphicalModelType;
       ///accumulation type
       typedef ACC                                         AccumulationType;
       OPENGM_GM_TYPE_TYPEDEFS;
@@ -79,59 +78,81 @@ namespace opengm {
       typedef opengm::visitors::TimingVisitor<AStar<GM, ACC> > TimingVisitorType;
       typedef opengm::visitors::EmptyVisitor<AStar<GM, ACC> > EmptyVisitorType;
       
-      enum Heuristic{
-         DEFAULT_HEURISTIC = 0,
-         FAST_HEURISTIC = 1,
-         STANDARD_HEURISTIC = 2
-      };
-      struct Parameter {
-         Parameter()
-            {
-               maxHeapSize_    = 3000000;
-               numberOfOpt_    = 1;
-               objectiveBound_ = AccumulationType::template neutral<ValueType>();
-               heuristic_      = Parameter::DEFAULTHEURISTIC;
-            };
-            /// constuctor
-
-         /// \brief add tree factor id
-         /// \param id factor id
-         void addTreeFactorId(size_t id)
-            { treeFactorIds_.push_back(id); }
-         /// DEFAULTHEURISTIC ;
-         static const size_t DEFAULTHEURISTIC = 0;
-         /// FASTHEURISTIC
-         static const size_t FASTHEURISTIC = 1;
-         /// STANDARDHEURISTIC
-         static const size_t STANDARDHEURISTIC = 2;
-         /// maxHeapSize_ maximum size of the heap
-         size_t maxHeapSize_;
-         /// number od N-best solutions that should be found
-         size_t              numberOfOpt_;
-         /// objective bound
-         ValueType          objectiveBound_;
-         /// heuritstic
-         ///
-         /// DEFAULTHEURISTIC = 0;
-         /// FASTHEURISTIC = 1
-         /// STANDARDHEURISTIC = 2
-         size_t heuristic_;  
-         std::vector<IndexType> nodeOrder_;
-         std::vector<size_t> treeFactorIds_;
-       
-      };
-      AStar(const GM& gm, Parameter para = Parameter());
-      virtual std::string name() const {return "AStar";}
-      const GraphicalModelType& graphicalModel() const;
-      virtual InferenceTermination infer();
-      virtual void reset();
-      template<class VisitorType> InferenceTermination infer(VisitorType& vistitor);
-      ValueType bound()const {return belowBound_;}
-      ValueType value()const;
-      virtual InferenceTermination marginal(const size_t,IndependentFactorType& out)const        {return UNKNOWN;}
-      virtual InferenceTermination factorMarginal(const size_t, IndependentFactorType& out)const {return UNKNOWN;}
-      virtual InferenceTermination arg(std::vector<LabelType>& v, const size_t = 1)const;
-      virtual InferenceTermination args(std::vector< std::vector<LabelType> >& v)const;
+
+    template<class _GM>
+    struct RebindGm{
+        typedef AStar<_GM, ACC> type;
+    };
+
+    template<class _GM,class _ACC>
+    struct RebindGmAndAcc{
+        typedef AStar<_GM, _ACC > type;
+    };
+
+
+    enum Heuristic{
+        DEFAULT_HEURISTIC = 0,
+        FAST_HEURISTIC = 1,
+        STANDARD_HEURISTIC = 2
+    };
+
+    struct Parameter {
+        Parameter()
+        {
+            maxHeapSize_    = 3000000;
+            numberOfOpt_    = 1;
+            objectiveBound_ = AccumulationType::template neutral<ValueType>();
+            heuristic_      = Parameter::DEFAULTHEURISTIC;
+        };
+        
+        template<class P>
+        Parameter(const P & p )
+        :   maxHeapSize_(p.maxHeapSize_),
+            numberOfOpt_(p.numberOfOpt_),
+            objectiveBound_(p.objectiveBound_),
+            nodeOrder_(p.nodeOrder_),
+            treeFactorIds_(p.treeFactorIds_){
+        }
+
+        /// \brief add tree factor id
+        /// \param id factor id
+        void addTreeFactorId(size_t id)
+        { treeFactorIds_.push_back(id); }
+        /// DEFAULTHEURISTIC ;
+        static const size_t DEFAULTHEURISTIC = 0;
+        /// FASTHEURISTIC
+        static const size_t FASTHEURISTIC = 1;
+        /// STANDARDHEURISTIC
+        static const size_t STANDARDHEURISTIC = 2;
+        /// maxHeapSize_ maximum size of the heap
+        size_t maxHeapSize_;
+        /// number od N-best solutions that should be found
+        size_t              numberOfOpt_;
+        /// objective bound
+        ValueType          objectiveBound_;
+        /// heuritstic
+        ///
+        /// DEFAULTHEURISTIC = 0;
+        /// FASTHEURISTIC = 1
+        /// STANDARDHEURISTIC = 2
+        size_t heuristic_;  
+        std::vector<IndexType> nodeOrder_;
+        std::vector<size_t> treeFactorIds_;
+
+    };
+
+    AStar(const GM& gm, Parameter para = Parameter());
+    virtual std::string name() const {return "AStar";}
+    const GraphicalModelType& graphicalModel() const;
+    virtual InferenceTermination infer();
+    virtual void reset();
+    template<class VisitorType> InferenceTermination infer(VisitorType& vistitor);
+    ValueType bound()const {return belowBound_;}
+    ValueType value()const;
+    virtual InferenceTermination marginal(const size_t,IndependentFactorType& out)const        {return UNKNOWN;}
+    virtual InferenceTermination factorMarginal(const size_t, IndependentFactorType& out)const {return UNKNOWN;}
+    virtual InferenceTermination arg(std::vector<LabelType>& v, const size_t = 1)const;
+    virtual InferenceTermination args(std::vector< std::vector<LabelType> >& v)const;
 
    private:
       const GM&                                   gm_;
diff --git a/include/opengm/inference/bruteforce.hxx b/include/opengm/inference/bruteforce.hxx
index 1189486..2548293 100644
--- a/include/opengm/inference/bruteforce.hxx
+++ b/include/opengm/inference/bruteforce.hxx
@@ -23,7 +23,26 @@ public:
    typedef visitors::VerboseVisitor<Bruteforce<GM,ACC> > VerboseVisitorType;
    typedef visitors::EmptyVisitor<Bruteforce<GM,ACC> >   EmptyVisitorType;
    typedef visitors::TimingVisitor<Bruteforce<GM,ACC> >  TimingVisitorType;
-   class Parameter {};
+
+    template<class _GM>
+    struct RebindGm{
+        typedef Bruteforce<_GM, ACC> type;
+    };
+
+    template<class _GM,class _ACC>
+    struct RebindGmAndAcc{
+        typedef Bruteforce<_GM, _ACC > type;
+    };
+
+   struct Parameter {
+        Parameter(){
+
+        }
+        template<class P>
+        Parameter(const P & p){
+
+        }
+   };
 
    Bruteforce(const GraphicalModelType&);
    Bruteforce(const GraphicalModelType&, const Parameter&);
diff --git a/include/opengm/inference/combilp.hxx b/include/opengm/inference/combilp.hxx
index 632628d..639dbe9 100644
--- a/include/opengm/inference/combilp.hxx
+++ b/include/opengm/inference/combilp.hxx
@@ -413,6 +413,17 @@ namespace opengm{
       typedef ACC AccumulationType;
       typedef GM GraphicalModelType;
 
+        template<class _GM>
+        struct RebindGm{
+            typedef CombiLP<_GM, ACC, LPSOLVER> type;
+        };
+
+        template<class _GM,class _ACC>
+        struct RebindGmAndAcc{
+            typedef CombiLP<_GM, _ACC, LPSOLVER> type;
+        };
+
+
       OPENGM_GM_TYPE_TYPEDEFS;
       typedef visitors::VerboseVisitor<CombiLP<GM, ACC, LPSOLVER> > VerboseVisitorType;
       typedef visitors::EmptyVisitor<CombiLP<GM, ACC, LPSOLVER> >   EmptyVisitorType;
diff --git a/include/opengm/inference/dualdecomposition/dualdecomposition_bundle.hxx b/include/opengm/inference/dualdecomposition/dualdecomposition_bundle.hxx
index 1bee922..64a7dad 100644
--- a/include/opengm/inference/dualdecomposition/dualdecomposition_bundle.hxx
+++ b/include/opengm/inference/dualdecomposition/dualdecomposition_bundle.hxx
@@ -49,6 +49,20 @@ namespace opengm {
       typedef typename DDBaseType::SubVariableType               SubVariableType;
       typedef typename DDBaseType::SubVariableListType           SubVariableListType; 
 
+
+        template<class _GM>
+        struct RebindGm{
+            typedef typename INF:: template RebindGm<_GM>::type RebindedInf;
+            typedef DualDecompositionSubGradient<_GM, RebindedInf, DUALBLOCK> type;
+        };
+
+        template<class _GM,class _ACC>
+        struct RebindGmAndAcc{
+            typedef typename INF:: template RebindGm<_GM,_ACC>::type RebindedInf;
+            typedef DualDecompositionSubGradient<_GM, RebindedInf, DUALBLOCK> type;
+        };
+
+
       class Parameter : public DualDecompositionBaseParameter{
       public: 
          /// The relative accuracy which have to be garantee to stop with an approximative solution (set 0 for optimality)
@@ -79,6 +93,20 @@ namespace opengm {
               noBundle_(false),
               useHeuristicStepsize_(true)
             {};
+
+        template<class P>
+        Parameter(const P & p)
+        :
+            minimalRelAccuracy_(p.minimalRelAccuracy_),
+            subPara_(subPara_),
+            relativeDualBoundPrecision_(p.relativeDualBoundPrecision_),
+            maxBundlesize_(p.maxBundlesize_),
+            activeBoundFixing_(p.activeBoundFixing_),
+            minDualWeight_(p.minDualWeight_),
+            maxDualWeight_(p.maxDualWeight_),
+            noBundle_(p.noBundle_),
+            useHeuristicStepsize_(p.useHeuristicStepsize_){
+        }
       };
 
       using  DualDecompositionBase<GmType, DualBlockType >::gm_;
diff --git a/include/opengm/inference/dualdecomposition/dualdecomposition_subgradient.hxx b/include/opengm/inference/dualdecomposition/dualdecomposition_subgradient.hxx
index 2bee7c0..ae3247d 100644
--- a/include/opengm/inference/dualdecomposition/dualdecomposition_subgradient.hxx
+++ b/include/opengm/inference/dualdecomposition/dualdecomposition_subgradient.hxx
@@ -43,14 +43,35 @@ namespace opengm {
       typedef typename DDBaseType::SubVariableType               SubVariableType;
       typedef typename DDBaseType::SubVariableListType           SubVariableListType; 
 
-      class Parameter : public DualDecompositionBaseParameter{
-      public:
-         /// Parameter for Subproblems
-         typename InfType::Parameter subPara_;
-         bool useAdaptiveStepsize_;
-         bool useProjectedAdaptiveStepsize_;
-         Parameter() : useAdaptiveStepsize_(false), useProjectedAdaptiveStepsize_(false){};
-      };
+        template<class _GM>
+        struct RebindGm{
+            typedef typename INF:: template RebindGm<_GM>::type RebindedInf;
+            typedef DualDecompositionSubGradient<_GM, RebindedInf, DUALBLOCK> type;
+        };
+
+        template<class _GM,class _ACC>
+        struct RebindGmAndAcc{
+            typedef typename INF:: template RebindGm<_GM,_ACC>::type RebindedInf;
+            typedef DualDecompositionSubGradient<_GM, RebindedInf, DUALBLOCK> type;
+        };
+
+
+        class Parameter : public DualDecompositionBaseParameter{
+        public:
+            /// Parameter for Subproblems
+            typename InfType::Parameter subPara_;
+            bool useAdaptiveStepsize_;
+            bool useProjectedAdaptiveStepsize_;
+            Parameter() : useAdaptiveStepsize_(false), useProjectedAdaptiveStepsize_(false){};
+
+            template<class P>
+            Parameter(const P & p)
+            :   subPara_(p.subPara_),
+                useAdaptiveStepsize_(p.useAdaptiveStepsize_),
+                useProjectedAdaptiveStepsize_(p.useProjectedAdaptiveStepsize_){
+
+            }
+        };
 
       using  DualDecompositionBase<GmType, DualBlockType >::gm_;
       using  DualDecompositionBase<GmType, DualBlockType >::subGm_;
diff --git a/include/opengm/inference/dynamicprogramming.hxx b/include/opengm/inference/dynamicprogramming.hxx
index ec402cb..436c36e 100644
--- a/include/opengm/inference/dynamicprogramming.hxx
+++ b/include/opengm/inference/dynamicprogramming.hxx
@@ -24,7 +24,27 @@ namespace opengm {
     typedef visitors::VerboseVisitor<DynamicProgramming<GM, ACC> > VerboseVisitorType;
     typedef visitors::EmptyVisitor<DynamicProgramming<GM, ACC> >   EmptyVisitorType;
     typedef visitors::TimingVisitor<DynamicProgramming<GM, ACC> >  TimingVisitorType;
+
+
+    template<class _GM>
+    struct RebindGm{
+        typedef DynamicProgramming<_GM, ACC> type;
+    };
+
+    template<class _GM,class _ACC>
+    struct RebindGmAndAcc{
+        typedef DynamicProgramming<_GM, _ACC > type;
+    };
+
     struct Parameter {
+        Parameter(){
+
+        }
+        template<class P>
+        Parameter(const P &p)
+        : roots_(p.roots_){
+        }
+        
       std::vector<IndexType> roots_;
     };
 
diff --git a/include/opengm/inference/external/qpbo.hxx b/include/opengm/inference/external/qpbo.hxx
index ea0b732..91bb777 100644
--- a/include/opengm/inference/external/qpbo.hxx
+++ b/include/opengm/inference/external/qpbo.hxx
@@ -35,8 +35,20 @@ namespace opengm {
             TB0, TB1, TBX
          };
 
-         ///Parameter for opengm::external::QPBO
-         struct Parameter {
+        template<class _GM>
+        class RebindGm{
+            typedef QPBO<_GM> type;
+        };
+
+        template<class _GM,class _ACC>
+        class RebindGmAndAcc{
+            typedef QPBO<_GM> type;
+        };
+
+
+
+        ///Parameter for opengm::external::QPBO
+        struct Parameter {
             /// using probeing technique
             bool useProbeing_;
             /// forcing strong persistency
@@ -47,12 +59,23 @@ namespace opengm {
             std::vector<size_t> label_;
             /// \brief constructor
 
+            template<class P>
+            Parameter(const P & p)
+            :
+                strongPersistency_(p.strongPersistency_),
+                useImproveing_ (p.useImproveing_),
+                useProbeing_ (p.useProbeing_)
+            {
+
+            }
+
+
             Parameter() {
-               strongPersistency_ = true;
-               useImproveing_ = false;
-               useProbeing_ = false;
+                strongPersistency_ = true;
+                useImproveing_ = false;
+                useProbeing_ = false;
             }
-         };
+        };
          // construction
          QPBO(const GraphicalModelType& gm, const Parameter para = Parameter());
          ~QPBO();
diff --git a/include/opengm/inference/icm.hxx b/include/opengm/inference/icm.hxx
index 136e466..49d4cc2 100644
--- a/include/opengm/inference/icm.hxx
+++ b/include/opengm/inference/icm.hxx
@@ -36,33 +36,37 @@ public:
    typedef opengm::visitors::EmptyVisitor<ICM<GM,ACC> >  EmptyVisitorType;
    typedef opengm::visitors::TimingVisitor<ICM<GM,ACC> > TimingVisitorType;
 
-   class Parameter {
-   public:
-      Parameter(
-         const std::vector<LabelType>& startPoint
-      )
-      :  moveType_(SINGLE_VARIABLE),
-         startPoint_(startPoint) 
-         {}
+    template<class _GM>
+    class RebindGm{
+        typedef ICM<_GM, ACC> type;
+    };
 
-      Parameter(
-         MoveType moveType, 
-         const std::vector<LabelType>& startPoint 
-      )
-      :  moveType_(moveType),
-         startPoint_(startPoint) 
-         {}
-      
-      Parameter(
-         MoveType moveType = SINGLE_VARIABLE
-      )
-      :  moveType_(moveType),
-         startPoint_() 
-      {}
-      
-      MoveType moveType_;
-      std::vector<LabelType>  startPoint_;
-   };
+    template<class _GM,class _ACC>
+    class RebindGmAndAcc{
+        typedef ICM<_GM, _ACC> type;
+    };
+
+
+    class Parameter {
+    public:
+
+    Parameter(
+        MoveType moveType = SINGLE_VARIABLE
+    )
+    :   moveType_(moveType)
+    {
+
+    }
+
+    template<class OP>
+    Parameter(
+        const OP & otherParameter
+    ){
+        moveType_ = otherParameter.moveType_;
+    }
+
+        MoveType moveType_;
+    };
 
    ICM(const GraphicalModelType&);
    ICM(const GraphicalModelType&, const Parameter&);
@@ -114,27 +118,15 @@ ICM<GM, ACC>::ICM
    param_(parameter),
    currentMoveType_(SINGLE_VARIABLE)
 {
-   if(parameter.startPoint_.size() == gm.numberOfVariables()) {
-      movemaker_.initialize(parameter.startPoint_.begin() );
-   }
-   else if(parameter.startPoint_.size() != 0) {
-      throw RuntimeError("unsuitable starting point");
-   }
+
 }
       
 template<class GM, class ACC>
 inline void
 ICM<GM, ACC>::reset()
 {
-   if(param_.startPoint_.size() == gm_.numberOfVariables()) {
-      movemaker_.initialize(param_.startPoint_.begin() );
-   }
-   else if(param_.startPoint_.size() != 0) {
-      throw RuntimeError("unsuitable starting point");
-   }
-   else{
-      movemaker_.reset();
-   }
+
+    movemaker_.reset();
 }
    
 template<class GM, class ACC>
diff --git a/include/opengm/learning/dataset/dataset.hxx b/include/opengm/learning/dataset/dataset.hxx
index 3168c15..0ef892e 100644
--- a/include/opengm/learning/dataset/dataset.hxx
+++ b/include/opengm/learning/dataset/dataset.hxx
@@ -45,6 +45,9 @@ namespace opengm {
         friend class DatasetSerialization;
         // friend void loadAll<Dataset<GM,LOSS> > (const std::string datasetpath, const std::string prefix, Dataset<GM,LOSS>& ds);
 
+        //~Dataset(){
+        //    std::cout<<"KILL DATASET\n";
+        //}
      protected:	
          std::vector<size_t> count_;
          std::vector<bool> isCached_;
@@ -102,8 +105,11 @@ namespace opengm {
          OPENGM_ASSERT_OP(i, <, gmsWithLoss_.size());
          OPENGM_ASSERT_OP(i, <, gms_.size());
          OPENGM_ASSERT_OP(i, <, gts_.size());
+         //std::cout<<"copy gm\n";
          gmsWithLoss_[i] = gms_[i];    
+         //std::cout<<"copy done\n";
          LOSS loss(lossParams_[i]);         
+         OPENGM_CHECK_OP(gts_[i].size(),==, gmsWithLoss_[i].numberOfVariables(),"");
          loss.addLoss(gmsWithLoss_[i], gts_[i].begin());
       }
 
diff --git a/include/opengm/learning/dataset/editabledataset.hxx b/include/opengm/learning/dataset/editabledataset.hxx
index e608b3d..73290a0 100644
--- a/include/opengm/learning/dataset/editabledataset.hxx
+++ b/include/opengm/learning/dataset/editabledataset.hxx
@@ -68,7 +68,9 @@ namespace opengm {
         this->gms_[i] = gm;
         this->gts_[i] = gt;
         this->lossParams_[i] = p;
+        //std::cout<<"build model with loss\n";
         this->buildModelWithLoss(i);
+        //std::cout<<"build model with loss DONE\n";
     }
 
     template<class GM, class LOSS>
diff --git a/include/opengm/learning/loss/generalized-hammingloss.hxx b/include/opengm/learning/loss/generalized-hammingloss.hxx
index f7158a3..e19d945 100644
--- a/include/opengm/learning/loss/generalized-hammingloss.hxx
+++ b/include/opengm/learning/loss/generalized-hammingloss.hxx
@@ -22,8 +22,6 @@ public:
         double getNodeLossMultiplier(const size_t i) const;
         double getLabelLossMultiplier(const size_t i) const;
 
-        std::vector<double> nodeLossMultiplier_;
-        std::vector<double> labelLossMultiplier_;
 
         bool operator==(const GeneralizedHammingLoss & other) const{
                 return nodeLossMultiplier_ == labelLossMultiplier_;
@@ -44,6 +42,11 @@ public:
         void load(const hid_t& groupHandle);
         static std::size_t getLossId() { return lossId_; }
 
+
+        std::vector<double> nodeLossMultiplier_;
+        std::vector<double> labelLossMultiplier_;
+
+
     private:
         static const std::size_t lossId_ = 16001;
 
@@ -121,19 +124,26 @@ double GeneralizedHammingLoss::loss(const GM & gm, IT1 labelBegin, const IT1 lab
 template<class GM, class IT>
 void GeneralizedHammingLoss::addLoss(GM& gm, IT gt) const
 {
-
+    //std::cout<<"start to add loss\n";
     for(typename GM::IndexType i=0; i<gm.numberOfVariables(); ++i){
+        //std::cout<<"   vi"<<i<<"\n";
         typename GM::LabelType numL = gm.numberOfLabels(i);
-        opengm::ExplicitFunction<typename GM::ValueType,typename GM::IndexType, typename GM::LabelType> f(&numL, &(numL)+1, 0);
+        //std::cout<<"   vi numL"<<numL<<"\n";
+        opengm::ExplicitFunction<typename GM::ValueType,typename GM::IndexType, typename GM::LabelType> f(&numL, &numL+1, 0);
 
+        //std::cout<<"   apply multiplier\n";
         for(typename GM::LabelType l = 0; l < numL; ++l){
             f(l) = - param_.getNodeLossMultiplier(i) * param_.getLabelLossMultiplier(l);
         }
 
         f(*gt) = 0;
+        //std::cout<<"   increment\n";
         ++gt;
-        gm.addFactor(gm.addFunction(f), &i, &(i)+1);
+        //std::cout<<"   add\n";
+        gm.addFactor(gm.addFunction(f), &i, &i+1);
+        //std::cout<<"   next\n";
     }
+    //std::cout<<"end add loss\n";
 }
 
 } // namespace learning
diff --git a/include/opengm/python/opengmpython.hxx b/include/opengm/python/opengmpython.hxx
index 00eea11..de21b7a 100644
--- a/include/opengm/python/opengmpython.hxx
+++ b/include/opengm/python/opengmpython.hxx
@@ -65,8 +65,6 @@ namespace python{
         typedef opengm::TruncatedAbsoluteDifferenceFunction   <ValueType,IndexType,LabelType> PyTruncatedAbsoluteDifferenceFunction;
         typedef opengm::TruncatedSquaredDifferenceFunction    <ValueType,IndexType,LabelType> PyTruncatedSquaredDifferenceFunction;
         typedef opengm::SparseFunction                        <ValueType,IndexType,LabelType> PySparseFunction; 
-        typedef PythonFunction                                <ValueType,IndexType,LabelType> PyPythonFunction; 
-        // learning functions
         typedef opengm::functions::learnable::LPotts          <ValueType,IndexType,LabelType> PyLPottsFunction;
         typedef opengm::functions::learnable::LUnary          <ValueType,IndexType,LabelType> PyLUnaryFunction;
 
@@ -79,7 +77,6 @@ namespace python{
             PyTruncatedAbsoluteDifferenceFunction,
             PyTruncatedSquaredDifferenceFunction,
             PySparseFunction,
-            PyPythonFunction,
             PyLPottsFunction,
             PyLUnaryFunction
         >::type type;
@@ -105,7 +102,6 @@ namespace python{
    typedef opengm::SquaredDifferenceFunction             <GmValueType,GmIndexType,GmLabelType> GmSquaredDifferenceFunction;
    typedef opengm::TruncatedSquaredDifferenceFunction    <GmValueType,GmIndexType,GmLabelType> GmTruncatedSquaredDifferenceFunction;
    typedef opengm::SparseFunction                        <GmValueType,GmIndexType,GmLabelType> GmSparseFunction; 
-   typedef opengm::python::PythonFunction                <GmValueType,GmIndexType,GmLabelType> GmPythonFunction; 
    typedef opengm::functions::learnable::LPotts          <GmValueType,GmIndexType,GmLabelType> PyLPottsFunction;
    typedef opengm::functions::learnable::LUnary          <GmValueType,GmIndexType,GmLabelType> PyLUnaryFunction;
    
diff --git a/src/interfaces/python/opengm/functionhelper.py b/src/interfaces/python/opengm/functionhelper.py
index cf424cb..c8833e8 100644
--- a/src/interfaces/python/opengm/functionhelper.py
+++ b/src/interfaces/python/opengm/functionhelper.py
@@ -4,12 +4,12 @@ from opengmcore._opengmcore import (SparseFunction,
                                     TruncatedAbsoluteDifferenceFunction,
                                     TruncatedSquaredDifferenceFunction,
                                     PottsFunction, PottsNFunction,
-                                    PottsGFunction , PythonFunction,
+                                    PottsGFunction ,
                                     SparseFunctionVector, 
                                     TruncatedAbsoluteDifferenceFunctionVector,
                                     TruncatedSquaredDifferenceFunctionVector,
                                     PottsFunctionVector, PottsNFunctionVector,
-                                    PottsGFunctionVector , PythonFunctionVector,
+                                    PottsGFunctionVector ,
                                     
                                     )
 
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index 5a1c95d..40994db 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -1,9 +1,9 @@
 from _learning import *
+from _learning import _lunarySharedFeatFunctionsGen,_lpottsFunctionsGen
 import numpy
 import struct
 from opengm import index_type,value_type, label_type
 from opengm import configuration as opengmConfig, LUnaryFunction
-
 DatasetWithHammingLoss.lossType = 'hamming'
 DatasetWithGeneralizedHammingLoss.lossType = 'generalized-hamming'
 
@@ -129,32 +129,7 @@ def maxLikelihoodLearner(dataset):
     return learner
 
 
-def lPottsFunctions(nFunctions, numberOfLabels, features, weightIds):
-
-    # check that features has the correct shape
-    if features.ndim != 2:
-        raise RuntimeError("feature must be two-dimensional")
-    if features.shape[0] != nFunctions :
-        raise RuntimeError("nFunctions.shape[0] must be equal to nFunctions")
-
-
-    # check that weights has the correct shape
-    if features.ndim != 1:
-        raise RuntimeError("weightIds must be one-dimensional")
-    if weightIds.shape[0] != features.shape[1] :
-        raise RuntimeError("weightIds.shape[0]  must be equal to features.shape[1]")
-
-
-    # require the correct types
-    features = numpy.require(features, dtype=value_type)
-    weightIds = numpy.require(weightIds, dtype=index_type)
-    numberOfLabels = int(numberOfLabels)
-    nFunctions = int(nFunctions)
-
-    # do the c++ call here
-    # which generates a function generator
 
-    raise RuntimeError("not yet implemented")
 
 
 def lUnaryFunction(weights, numberOfLabels, features, weightIds):
@@ -179,9 +154,72 @@ def lUnaryFunction(weights, numberOfLabels, features, weightIds):
                           features=features, weightIds=weightIds)
 
 
-def lUnaryFunctions(nFunctions, numberOfLabels, features, weightIds):
-    raise RuntimeError("not yet implemented")
 
 
+class FeaturePolicy(object):
+    sharedBetweenLabels = 0
+
+def lUnaryFunctions(weights,numberOfLabels, features, weightIds,
+                    featurePolicy = FeaturePolicy.sharedBetweenLabels, 
+                    **kwargs):
+
+    if (featurePolicy == FeaturePolicy.sharedBetweenLabels ):
+
+        makeFirstEntryConst = kwargs.get('makeFirstEntryConst',False)
+        addConstFeature = kwargs.get('addConstFeature',False)
+
+
+        ff = numpy.require(features, dtype=value_type)
+        wid = numpy.require(weightIds, dtype=index_type)
+
+        assert features.ndim == 2
+        assert weightIds.ndim == 2
+
+
+        res = _lunarySharedFeatFunctionsGen(
+            weights = weights,
+            numFunctions = int(ff.shape[0]),
+            numLabels = int(numberOfLabels),
+            features = ff,
+            weightIds = wid,
+            makeFirstEntryConst = bool(makeFirstEntryConst),
+            addConstFeature = bool(addConstFeature)
+        )
+
+        res.__dict__['_features_'] = weights
+        res.__dict__['_weights_'] = features
+        return res
+
+def lPottsFunctions(weights, numberOfLabels, features, weightIds,
+                    addConstFeature = False):
+
+    # check that features has the correct shape
+    if features.ndim != 2:
+        raise RuntimeError("feature must be two-dimensional")
+
+    # check that weights has the correct shape
+    if weightIds.ndim != 1:
+        raise RuntimeError("weightIds must be one-dimensional")
+    if weightIds.shape[0] != features.shape[1] + int(addConstFeature) :
+        raise RuntimeError("weightIds.shape[0]  must be equal to features.shape[1]")
+
+
+
+    # do the c++ call here
+    # which generates a function generator
 
 
+    ff = numpy.require(features, dtype=value_type)
+    wid = numpy.require(weightIds, dtype=index_type)
+    res =  _lpottsFunctionsGen(
+        weights=weights,
+        numFunctions=long(features.shape[0]),
+        numLabels=long(numberOfLabels),
+        features=ff,
+        weightIds=wid,
+        addConstFeature=bool(addConstFeature)
+    )
+
+    res.__dict__['_features_'] = wid
+    res.__dict__['_weights_'] = ff
+    return res
diff --git a/src/interfaces/python/opengm/learning/learning.cxx b/src/interfaces/python/opengm/learning/learning.cxx
index f0e4dda..d7333ee 100644
--- a/src/interfaces/python/opengm/learning/learning.cxx
+++ b/src/interfaces/python/opengm/learning/learning.cxx
@@ -9,6 +9,7 @@
 #include <opengm/learning/loss/hammingloss.hxx>
 #include <opengm/learning/loss/generalized-hammingloss.hxx>
 #include <opengm/learning/loss/noloss.hxx>
+//#include <opengm/learning/loss/flexibleloss.hxx>
 
 #if defined(WITH_CPLEX) || defined(WITH_GUROBI)
 #include <opengm/learning/bundle-optimizer.hxx>
@@ -62,12 +63,15 @@ BOOST_PYTHON_MODULE_INIT(_learning) {
 
     // templated datasets
     opengm::export_dataset<op::GmAdder, ol::HammingLoss >("DatasetWithHammingLoss");
-    //opengm::export_dataset<op::GmAdder, ol::NoLoss >("DatasetWithNoLoss");
     opengm::export_dataset<op::GmAdder, ol::GeneralizedHammingLoss >("DatasetWithGeneralizedHammingLoss");
+    //opengm::export_dataset<op::GmAdder, ol::FlexibleLoss >("DatasetWithFlexibleLoss");
+
 
 
     opengm::export_grid_search_learner<op::GmAdderHammingLossDataset>("GridSearch_HammingLoss");
     opengm::export_grid_search_learner<op::GmAdderGeneralizedHammingLossDataset>("GridSearch_GeneralizedHammingLoss");
+    opengm::export_grid_search_learner<op::GmAdderGeneralizedHammingLossDataset>("GridSearch_FlexibleLoss");
+    
 
     opengm::export_max_likelihood_learner<op::GmAdderHammingLossDataset>("MaxLikelihood_HammingLoss");
     opengm::export_max_likelihood_learner<op::GmAdderGeneralizedHammingLossDataset>("MaxLikelihood_GeneralizedHammingLoss");
diff --git a/src/interfaces/python/opengm/learning/pyDataset.cxx b/src/interfaces/python/opengm/learning/pyDataset.cxx
index a89f263..5eae4a4 100644
--- a/src/interfaces/python/opengm/learning/pyDataset.cxx
+++ b/src/interfaces/python/opengm/learning/pyDataset.cxx
@@ -22,9 +22,9 @@ namespace opengm{
 template<class GM, class LOSS>
 void pySetInstanceWithLossParam(opengm::datasets::EditableDataset<GM, LOSS>& ds,
                    const size_t i,
-                   GM& gm,
-                   const opengm::python::NumpyView<typename GM::LabelType,1>& gt,
-                   typename LOSS::Parameter param) {
+                   const GM& gm,
+                   const opengm::python::NumpyView<typename GM::LabelType,1>  gt,
+                   const typename LOSS::Parameter & param) {
     std::vector<typename GM::LabelType> gt_vector(gt.begin(), gt.end());
     ds.setInstance(i, gm, gt_vector, param);
 }
@@ -32,7 +32,7 @@ void pySetInstanceWithLossParam(opengm::datasets::EditableDataset<GM, LOSS>& ds,
 template<class GM, class LOSS>
 void pySetInstance(opengm::datasets::EditableDataset<GM, LOSS>& ds,
                    const size_t i,
-                   GM& gm,
+                   const GM& gm,
                    const opengm::python::NumpyView<typename GM::LabelType,1>& gt
                    ) {
     pySetInstanceWithLossParam(ds, i, gm, gt, typename LOSS::Parameter());
@@ -40,16 +40,16 @@ void pySetInstance(opengm::datasets::EditableDataset<GM, LOSS>& ds,
 
 template<class GM, class LOSS>
 void pyPushBackInstanceWithLossParam(opengm::datasets::EditableDataset<GM,LOSS>& ds,
-                        GM& gm,
+                        const GM& gm,
                         const opengm::python::NumpyView<typename GM::LabelType,1>& gt,
-                        typename LOSS::Parameter param) {
+                        const typename LOSS::Parameter & param) {
     std::vector<typename GM::LabelType> gt_vector(gt.begin(), gt.end());
     ds.pushBackInstance(gm, gt_vector, param);
 }
 
 template<class GM, class LOSS>
 void pyPushBackInstance(opengm::datasets::EditableDataset<GM,LOSS>& ds,
-                        GM& gm,
+                        const GM& gm,
                         const opengm::python::NumpyView<typename GM::LabelType,1>& gt
                         ) {
     pyPushBackInstanceWithLossParam(ds, gm, gt, typename LOSS::Parameter());
@@ -73,8 +73,7 @@ template<class GM, class LOSS>
 void export_dataset(const std::string& className){
     typedef opengm::datasets::EditableDataset<GM,LOSS > PyDataset;
 
-   class_<PyDataset > (className.c_str(), boost::python::no_init)
-           .def(init<size_t>())
+   class_<PyDataset > (className.c_str(),init<size_t>())
            .def("lockModel", &PyDataset::lockModel)
            .def("unlockModel", &PyDataset::unlockModel)
            .def("getModel", &PyDataset::getModel, return_internal_reference<>())
diff --git a/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx b/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx
index 79db09f..2028251 100644
--- a/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx
+++ b/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx
@@ -110,6 +110,7 @@ namespace opengm{
             const size_t numLabels,
             op::NumpyView<ValueType, 2> features,
             op::NumpyView<IndexType, 2> weightIds,
+            const bool makeFirstEntryConst,
             const bool addConstFeature
         ):
         FunctionGeneratorBase<GM_ADDER,GM_MULT>(),
@@ -117,11 +118,13 @@ namespace opengm{
         numFunctions_(numFunctions),
         numLabels_(numLabels),
         features_(features),
-        //weightIds_(weightIds.begin(), weightIds.end()),
+        weightIds_(weightIds),
+        makeFirstEntryConst_(makeFirstEntryConst),
         addConstFeature_(addConstFeature)
         {
-            //OPENGM_CHECK_OP(features.shape(0), == , numFunctions, "wrong shape");
-            //OPENGM_CHECK_OP(features.shape(1)+int(addConstFeature), == , weightIds.shape(0), "wrong shape");
+            OPENGM_CHECK_OP(features.shape(0), == , numFunctions, "wrong shape");
+            OPENGM_CHECK_OP(weightIds.shape(1), == , features.shape(1) + int(addConstFeature), "wrong shape");
+            OPENGM_CHECK_OP(weightIds.shape(0)+int(makeFirstEntryConst), == ,numLabels, "wrong shape");
         }
  
 
@@ -131,17 +134,32 @@ namespace opengm{
             typedef typename GM::FunctionIdentifier Fid;
             typedef std::vector<Fid> FidVector;
             FidVector * fidVector = new FidVector(numFunctions_);
+
+
             const size_t nFeat =features_.shape(1);
-            std::vector<ValueType> fFeat(nFeat+int(addConstFeature_));
+            const size_t nWPerL = nFeat+int(addConstFeature_);
+            marray::Marray<ValueType> fFeat(&nWPerL,&nWPerL+1);
+
+
+            // copy the weights once!
+            const size_t wShape[2] = {numLabels_- int(makeFirstEntryConst_) ,nWPerL};
+            marray::Marray<size_t> _weightIds(wShape, wShape+2);
+
+            for(size_t ll=0; ll<wShape[0]; ++ll)
+            for(size_t wi=0; wi<wShape[1]; ++wi){
+                _weightIds(ll,wi) = weightIds_(ll,wi);
+            }    
+
+
             for(size_t  i=0;i<numFunctions_;++i){
+                // copy the features for that instance
                 for(size_t f=0; f<nFeat; ++f){
-                    fFeat[f] = features_(i,f);
+                    fFeat(f) = features_(i,f);
                 }
                 if(addConstFeature_){
-                    fFeat[nFeat] = 1.0;
+                    fFeat(nFeat) = 1.0;
                 }
-            //    const FType f(weights_, numLabels_, weightIds_, fFeat);
-            //    (*fidVector)[i] = gm.addFunction(f);
+                FType(weights_, numLabels_, _weightIds, fFeat, makeFirstEntryConst_);
             }   
             return fidVector;
         }
@@ -158,6 +176,8 @@ namespace opengm{
         size_t numFunctions_;
         size_t numLabels_;
         op::NumpyView<ValueType, 2>  features_;
+        op::NumpyView<IndexType, 2>  weightIds_;
+        bool makeFirstEntryConst_;
         bool addConstFeature_;
     };
 
@@ -169,10 +189,13 @@ namespace opengm{
         const size_t numLabels,
         opengm::python::NumpyView<typename GM_ADDER::ValueType,2> features,
         opengm::python::NumpyView<typename GM_ADDER::IndexType,2> weightIds,
+        const bool makeFirstEntryConst,
         const bool addConstFeature
     ){
         FunctionGeneratorBase<GM_ADDER,GM_MULT> * ptr = 
-            new LUnarySharedFeatFunctionGen<GM_ADDER,GM_MULT>(weights,numFunctions,numLabels,features,weightIds, addConstFeature);
+            new LUnarySharedFeatFunctionGen<GM_ADDER,GM_MULT>(weights,numFunctions,numLabels,
+                                                              features,weightIds,makeFirstEntryConst,
+                                                              addConstFeature);
         return ptr;
     }
 
@@ -214,8 +237,7 @@ namespace opengm{
                 bp::arg("features"),
                 bp::arg("weightIds"),
                 bp::arg("addConstFeature")
-            ),
-            "factory function to generate a lpotts function generator object which can be passed to ``gm.addFunctions(functionGenerator)``"
+            )
         );
 
          bp::def("_lunarySharedFeatFunctionsGen",&lunarySharedFeatFunctionGen<GM_ADDER,GM_MULT>,bp::return_value_policy<bp::manage_new_object>(),
@@ -225,9 +247,9 @@ namespace opengm{
                 bp::arg("numLabels"),
                 bp::arg("features"),
                 bp::arg("weightIds"),
+                bp::arg("makeFirstEntryConst"),
                 bp::arg("addConstFeature")
-            ),
-            "factory function to generate a lunary function generator object which can be passed to ``gm.addFunctions(functionGenerator)``"
+            )
         );
 
     }
diff --git a/src/interfaces/python/opengm/opengmcore/__init__.py b/src/interfaces/python/opengm/opengmcore/__init__.py
index 09bc5f1..f7a8ef8 100644
--- a/src/interfaces/python/opengm/opengmcore/__init__.py
+++ b/src/interfaces/python/opengm/opengmcore/__init__.py
@@ -192,14 +192,6 @@ class Multiplier:
       return float(1.0)
 
  
-def modelViewFunction(factor):
-  class _ModelViewFunction:
-    def __init__(self,factor):
-      self.factor=factor
-    def __call__(self,labeling):
-      return self.factor[labeling]
-  return PythonFunction( _ModelViewFunction(factor) ,factor.shape.__tuple__())
-
 #Model generators
 def grid2d2Order(unaries,regularizer,order='numpy',operator='adder'):
    """ 
@@ -349,7 +341,6 @@ _TruncatedSquaredDifferenceFunction  = TruncatedSquaredDifferenceFunction
 _PottsFunction                       = PottsFunction
 _PottsNFunction                      = PottsNFunction
 _PottsGFunction                      = PottsGFunction
-_PythonFunction                      = PythonFunction
 _FactorSubset                        = FactorSubset
 
 
diff --git a/src/interfaces/python/opengm/opengmcore/function_injector.py b/src/interfaces/python/opengm/opengmcore/function_injector.py
index a822ee8..96d783d 100644
--- a/src/interfaces/python/opengm/opengmcore/function_injector.py
+++ b/src/interfaces/python/opengm/opengmcore/function_injector.py
@@ -1,11 +1,11 @@
 from _opengmcore import ExplicitFunction,SparseFunction, \
                         TruncatedAbsoluteDifferenceFunction, \
                         TruncatedSquaredDifferenceFunction,PottsFunction,PottsNFunction, \
-                        PottsGFunction,PythonFunction,\
+                        PottsGFunction,\
                         ExplicitFunctionVector,SparseFunctionVector, \
                         TruncatedAbsoluteDifferenceFunctionVector, \
                         TruncatedSquaredDifferenceFunctionVector,PottsFunctionVector,PottsNFunctionVector, \
-                        PottsGFunctionVector,PythonFunctionVector
+                        PottsGFunctionVector
 import numpy
 
 
@@ -25,8 +25,8 @@ def _extend_function_vector_classes():
     function_vector_classes=[   ExplicitFunctionVector,SparseFunctionVector,
                                 TruncatedAbsoluteDifferenceFunctionVector,
                                 TruncatedSquaredDifferenceFunctionVector,PottsFunctionVector,
-                                PottsNFunctionVector,PottsGFunctionVector,
-                                PythonFunctionVector ]  
+                                PottsNFunctionVector,PottsGFunctionVector
+                                 ]  
 
     for function_vector in function_vector_classes:
         class InjectorGenericFunctionVector(object):
@@ -50,8 +50,7 @@ def _extend_function_type_classes():
   function_classes=[ExplicitFunction,SparseFunction,
                     TruncatedAbsoluteDifferenceFunction,
                     TruncatedSquaredDifferenceFunction,PottsFunction,
-                    PottsNFunction,PottsGFunction,
-                    PythonFunction]
+                    PottsNFunction,PottsGFunction]
 
 
 
diff --git a/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx b/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
index 70827a0..1cee266 100644
--- a/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
+++ b/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
@@ -216,10 +216,10 @@ namespace pyfunction{
 
         const size_t weightShape0 =  weightIds.shape(0);
         for(size_t l=0; l<weightShape0; ++l){
-            fiVec[l].indices.resize(fPerL);
+            fiVec[l].weightIds.resize(fPerL);
             fiVec[l].features.resize(fPerL);
             for(size_t i=0; i<fPerL; ++i){
-                fiVec[l].indices[i] = weightIds(l, i);
+                fiVec[l].weightIds[i] = weightIds(l, i);
                 fiVec[l].features[i] = features(l, i);
             }
         }
@@ -268,11 +268,11 @@ namespace pyfunction{
                 "for one label the number of features and the number of weights must be the same");
 
             const size_t fPerL = wId.shape(0);
-            fiVec[l].indices.resize(fPerL);
+            fiVec[l].weightIds.resize(fPerL);
             fiVec[l].features.resize(fPerL);
 
             for(size_t i=0; i<fPerL; ++i){
-                fiVec[l].indices[i] = wId(i);
+                fiVec[l].weightIds[i] = wId(i);
                 fiVec[l].features[i] = fs(i);
             }
         }
@@ -475,7 +475,6 @@ void export_functiontypes(){
    typedef opengm::SquaredDifferenceFunction             <ValueType,IndexType,LabelType> PySquaredDifferenceFunction;
    typedef opengm::TruncatedSquaredDifferenceFunction    <ValueType,IndexType,LabelType> PyTruncatedSquaredDifferenceFunction;
    typedef opengm::SparseFunction                        <ValueType,IndexType,LabelType> PySparseFunction; 
-   typedef opengm::python::PythonFunction                <ValueType,IndexType,LabelType> PyPythonFunction; 
    typedef opengm::functions::learnable::LPotts          <ValueType,IndexType,LabelType> PyLPottsFunction;
    typedef opengm::functions::learnable::LUnary          <ValueType,IndexType,LabelType> PyLUnaryFunction;
    typedef opengm::functions::learnable::LSumOfExperts   <ValueType,IndexType,LabelType> PyLSumOfExpertsFunction;
@@ -503,7 +502,6 @@ void export_functiontypes(){
    //export_function_type_vector<PySquaredDifferenceFunction>("SquaredDifferenceFunctionVector");
    export_function_type_vector<PyTruncatedSquaredDifferenceFunction>("TruncatedSquaredDifferenceFunctionVector");
    export_function_type_vector<PySparseFunction>("SparseFunctionVector");
-   export_function_type_vector<PyPythonFunction>("PythonFunctionVector");
 
    typedef typename PySparseFunction::ContainerType PySparseFunctionMapType;
    //export std::map for sparsefunction
@@ -726,22 +724,6 @@ void export_functiontypes(){
    )
    ;
    
-   FUNCTION_TYPE_EXPORTER_HELPER(PyPythonFunction,                       "PythonFunction")
-   .def(init<boost::python::object,boost::python::object,const bool>(
-         (arg("function"),arg("shape"),arg("ensureGilState")=true),
-         "Examples: ::\n\n"
-         "   >>> import opengm\n"
-         "   >>> import numpy\n" 
-         "   >>> def labelSumFunction(labels):\n"
-         "   ...    s=0\n"
-         "   ...    for l in labels:\n"
-         "   ...       s+=l\n"
-         "   ...    return s\n"
-         "   >>> f=opengm.PythonFunction(function=labelSumFunction,shape=[2,2])\n"
-         "\n\n"
-      )
-   )
-   ;
 
 
    FUNCTION_TYPE_EXPORTER_HELPER(PyLPottsFunction,"LPottsFunction")
diff --git a/src/interfaces/python/opengm/opengmcore/pyGm.cxx b/src/interfaces/python/opengm/opengmcore/pyGm.cxx
index e6102cc..1b2c6cf 100644
--- a/src/interfaces/python/opengm/opengmcore/pyGm.cxx
+++ b/src/interfaces/python/opengm/opengmcore/pyGm.cxx
@@ -682,7 +682,6 @@ namespace pygm {
          typedef opengm::SquaredDifferenceFunction             <ValueType,IndexType,LabelType> PySquaredDifferenceFunction;
          typedef opengm::TruncatedSquaredDifferenceFunction    <ValueType,IndexType,LabelType> PyTruncatedSquaredDifferenceFunction;
          typedef opengm::SparseFunction                        <ValueType,IndexType,LabelType> PySparseFunction; 
-         typedef opengm::python::PythonFunction                <ValueType,IndexType,LabelType> PyPythonFunction; 
          typedef opengm::functions::learnable::LPotts          <ValueType,IndexType,LabelType> PyLPottsFunction;
          typedef opengm::functions::learnable::LUnary          <ValueType,IndexType,LabelType> PyLUnaryFunction;
 
@@ -707,9 +706,6 @@ namespace pygm {
          else if(fname==std::string("sparse")){
             return gm. template  reserveFunctions<PySparseFunction>(size);
          }
-         else if(fname==std::string("python")){
-            return gm. template  reserveFunctions<PyPythonFunction>(size);
-         }
          else if(fname==std::string("lpotts")){
             return gm. template  reserveFunctions<PyLPottsFunction>(size);
          }
@@ -1465,7 +1461,6 @@ void export_gm() {
    typedef opengm::SquaredDifferenceFunction             <ValueType,IndexType,LabelType> PySquaredDifferenceFunction;
    typedef opengm::TruncatedSquaredDifferenceFunction    <ValueType,IndexType,LabelType> PyTruncatedSquaredDifferenceFunction;
    typedef opengm::SparseFunction                        <ValueType,IndexType,LabelType> PySparseFunction; 
-   typedef opengm::python::PythonFunction                <ValueType,IndexType,LabelType> PyPythonFunction; 
    typedef opengm::functions::learnable::LPotts          <ValueType,IndexType,LabelType> PyLPottsFunction;
    typedef opengm::functions::learnable::LUnary          <ValueType,IndexType,LabelType> PyLUnaryFunction;
 
@@ -1845,7 +1840,6 @@ void export_gm() {
    //.def("_addFunctions_vector",&pygm::addFunctionsGenericVectorPy<PyGm,PySquaredDifferenceFunction>,return_value_policy<manage_new_object>(),args("functions"),"todo")
    .def("_addFunctions_vector",&pygm::addFunctionsGenericVectorPy<PyGm,PyTruncatedSquaredDifferenceFunction>,return_value_policy<manage_new_object>(),args("functions"),"todo")
    .def("_addFunctions_vector",&pygm::addFunctionsGenericVectorPy<PyGm,PySparseFunction>,return_value_policy<manage_new_object>(),args("functions"),"todo")
-   .def("_addFunctions_vector",&pygm::addFunctionsGenericVectorPy<PyGm,PyPythonFunction>,return_value_policy<manage_new_object>(),args("functions"),"todo")
 
    .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PyLUnaryFunction>,args("function"))
    .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PyLPottsFunction>,args("function"))
@@ -1857,8 +1851,6 @@ void export_gm() {
    //.def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PySquaredDifferenceFunction>,args("function"))
    .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PyTruncatedSquaredDifferenceFunction>,args("function"))
    .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PySparseFunction>,args("function"))
-   .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PyPythonFunction>,args("function"))
-   .def("_addFunction",&pygm::addFunctionGenericPy<PyGm,PyPythonFunction>,args("function"))
    .def("_addFunction", &pygm::addFunctionNpPy<PyGm>,args("function"))
    .def("_addFactor", &pygm::addFactor_Any<PyGm,int>, (arg("fid"),arg("variableIndices"),arg("finalize")))
    .def("_addFactor", &pygm::addFactor_Numpy<PyGm>, (arg("fid"),arg("variableIndices"),arg("finalize")))

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list