[opengm] 329/386: removed lfunctions

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:23 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 53f3284f7d5264c695881b3bb6475e9ec4d42b9d
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Fri Mar 25 12:42:27 2016 +0100

    removed lfunctions
---
 include/opengm/functions/learnable/lpotts.hxx      | 254 -----------
 include/opengm/functions/learnable/lunary.hxx      | 478 ---------------------
 .../learnable/lweightedsum_of_functions.hxx        | 264 ------------
 include/opengm/python/opengmpython.hxx             |  14 +-
 .../python/opengm/opengmcore/pyFunctionTypes.cxx   | 164 +------
 5 files changed, 9 insertions(+), 1165 deletions(-)

diff --git a/include/opengm/functions/learnable/lpotts.hxx b/include/opengm/functions/learnable/lpotts.hxx
deleted file mode 100644
index a9938c6..0000000
--- a/include/opengm/functions/learnable/lpotts.hxx
+++ /dev/null
@@ -1,254 +0,0 @@
-#pragma once
-#ifndef OPENGM_LEARNABLE_POTTS_FUNCTION_HXX
-#define OPENGM_LEARNABLE_POTTS_FUNCTION_HXX
-
-#include <algorithm>
-#include <vector>
-#include <cmath>
-
-#include "opengm/opengm.hxx"
-#include "opengm/functions/function_registration.hxx"
-#include "opengm/functions/function_properties_base.hxx"
-#include "opengm/graphicalmodel/weights.hxx"
-
-namespace opengm {
-namespace functions {
-namespace learnable {
-
-/// Learnable feature function for two variables
-///
-/// f(u,v) = (\sum_i w_i * feat_i) I(u!=v)
-///  - w    = parameter vector
-///  - feat = feature vector
-///
-/// derive from this class and implement the function
-///   paramaterGradient(i,x)= A(x)_{i,*}*feat
-///  
-/// \ingroup functions
-template<class T, class I = size_t, class L = size_t>
-class LPotts
-   : public opengm::FunctionBase<opengm::functions::learnable::LPotts<T, I, L>, T, I, L>
-{
-public:
-   typedef T ValueType;
-   typedef L LabelType;
-   typedef I IndexType;
- 
-   LPotts();
-   LPotts(const opengm::learning::Weights<T>& weights,
-      const L numLabels,
-      const std::vector<size_t>& weightIDs,
-      const std::vector<T>& feat
-      );
-   LPotts(const L numLabels,
-      const std::vector<size_t>& weightIDs,
-      const std::vector<T>& feat
-      );
-   L shape(const size_t) const;
-   size_t size() const;
-   size_t dimension() const;
-   template<class ITERATOR> T operator()(ITERATOR) const;
- 
-   // parameters
-   void setWeights(const opengm::learning::Weights<T>& weights) const
-      {weights_ = &weights;}
-   size_t numberOfWeights()const
-     {return weightIDs_.size();}
-   I weightIndex(const size_t weightNumber) const
-     {return weightIDs_[weightNumber];} //dummy
-   template<class ITERATOR> 
-   T weightGradient(size_t,ITERATOR) const;
-
-   bool isPotts() const {return true;}
-   bool isGeneralizedPotts() const {return true;}
-
-protected:
-   mutable const opengm::learning::Weights<T> * weights_;
-   L numLabels_;
-   std::vector<size_t> weightIDs_;
-   std::vector<T> feat_;
-
-
-    friend class opengm::FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >;
-};
-
-
-template <class T, class I, class L>
-inline
-LPotts<T, I, L>::LPotts
-( 
-   const opengm::learning::Weights<T>& weights,
-   const L numLabels,
-   const std::vector<size_t>& weightIDs,
-   const std::vector<T>& feat
-   )
-   :  weights_(&weights), numLabels_(numLabels), weightIDs_(weightIDs),feat_(feat)
-{
-  OPENGM_ASSERT( weightIDs_.size()==feat_.size() );
-}
-
-template <class T, class I, class L>
-inline
-LPotts<T, I, L>::LPotts
-( 
-   const L numLabels,
-   const std::vector<size_t>& weightIDs,
-   const std::vector<T>& feat
-   )
-   : numLabels_(numLabels), weightIDs_(weightIDs),feat_(feat)
-{
-  OPENGM_ASSERT( weightIDs_.size()==feat_.size() );
-}
-
-template <class T, class I, class L>
-inline
-LPotts<T, I, L>::LPotts
-( )
-   : numLabels_(0), weightIDs_(std::vector<size_t>(0)), feat_(std::vector<T>(0))
-{
-  OPENGM_ASSERT( weightIDs_.size()==feat_.size() );
-}
-
-
-template <class T, class I, class L>
-template <class ITERATOR>
-inline T
-LPotts<T, I, L>::weightGradient 
-(
-   size_t weightNumber,
-   ITERATOR begin
-) const {
-  OPENGM_ASSERT(weightNumber< numberOfWeights());
-  if( *(begin) != *(begin+1) )
-    return (*this).feat_[weightNumber];
-  return 0;
-}
-
-template <class T, class I, class L>
-template <class ITERATOR>
-inline T
-LPotts<T, I, L>::operator()
-(
-   ITERATOR begin
-) const {
-   T val = 0;
-   for(size_t i=0;i<numberOfWeights();++i){
-      val += weights_->getWeight(weightIDs_[i]) * weightGradient(i,begin);
-   }
-   return val;
-}
-
-
-template <class T, class I, class L>
-inline L
-LPotts<T, I, L>::shape
-(
-   const size_t i
-) const {
-   return numLabels_;
-}
-
-template <class T, class I, class L>
-inline size_t
-LPotts<T, I, L>::dimension() const {
-   return 2;
-}
-
-template <class T, class I, class L>
-inline size_t
-LPotts<T, I, L>::size() const {
-   return numLabels_*numLabels_;
-}
-
-} // namespace learnable
-} // namespace functions
-
-
-/// FunctionSerialization
-template<class T, class I, class L>
-class FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> > {
-public:
-   typedef typename opengm::functions::learnable::LPotts<T, I, L>::ValueType ValueType;
-
-   static size_t indexSequenceSize(const opengm::functions::learnable::LPotts<T, I, L>&);
-   static size_t valueSequenceSize(const opengm::functions::learnable::LPotts<T, I, L>&);
-   template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR>
-      static void serialize(const opengm::functions::learnable::LPotts<T, I, L>&, INDEX_OUTPUT_ITERATOR, VALUE_OUTPUT_ITERATOR);
-   template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR>
-      static void deserialize( INDEX_INPUT_ITERATOR, VALUE_INPUT_ITERATOR, opengm::functions::learnable::LPotts<T, I, L>&);
-};
-
-template<class T, class I, class L>
-struct FunctionRegistration<opengm::functions::learnable::LPotts<T, I, L> > {
-   enum ID {
-      Id = opengm::FUNCTION_TYPE_ID_OFFSET + 100 + 65
-   };
-};
-
-template<class T, class I, class L>
-inline size_t
-FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::indexSequenceSize
-(
-   const opengm::functions::learnable::LPotts<T, I, L> & src
-) {
-  return 2+src.weightIDs_.size();
-}
-
-template<class T, class I, class L>
-inline size_t
-FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::valueSequenceSize
-(
-   const opengm::functions::learnable::LPotts<T, I, L> & src
-) {
-  return src.feat_.size();
-}
-
-template<class T, class I, class L>
-template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR >
-inline void
-FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::serialize
-(
-   const opengm::functions::learnable::LPotts<T, I, L> & src,
-   INDEX_OUTPUT_ITERATOR indexOutIterator,
-   VALUE_OUTPUT_ITERATOR valueOutIterator
-) {
-   *indexOutIterator = src.numLabels_;
-   ++indexOutIterator; 
-   *indexOutIterator = src.feat_.size();
-   ++indexOutIterator;
-   for(size_t i=0; i<src.weightIDs_.size();++i){
-     *indexOutIterator = src.weightIndex(i);
-     ++indexOutIterator;
-   } 
-   for(size_t i=0; i<src.feat_.size();++i){
-     *valueOutIterator = src.feat_[i];
-     ++valueOutIterator;
-   }
-}
-
-template<class T, class I, class L>
-template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR >
-inline void
-FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::deserialize
-(
-   INDEX_INPUT_ITERATOR indexInIterator,
-   VALUE_INPUT_ITERATOR valueInIterator,
-   opengm::functions::learnable::LPotts<T, I, L> & dst
-) { 
-   dst.numLabels_=*indexInIterator;
-   ++ indexInIterator;
-   const size_t numW=*indexInIterator;
-   ++indexInIterator;
-   dst.feat_.resize(numW);
-   dst.weightIDs_.resize(numW);
-   for(size_t i=0; i<numW;++i){
-     dst.feat_[i]=*valueInIterator;
-     dst.weightIDs_[i]=*indexInIterator;
-     ++indexInIterator;
-     ++valueInIterator;
-   }
-}
-
-} // namespace opengm
-
-#endif // #ifndef OPENGM_LEARNABLE_FUNCTION_HXX
diff --git a/include/opengm/functions/learnable/lunary.hxx b/include/opengm/functions/learnable/lunary.hxx
deleted file mode 100644
index cc1b930..0000000
--- a/include/opengm/functions/learnable/lunary.hxx
+++ /dev/null
@@ -1,478 +0,0 @@
-#pragma once
-#ifndef OPENGM_LEARNABLE_UNARY_FUNCTION_HXX
-#define OPENGM_LEARNABLE_UNARY_FUNCTION_HXX
-
-#include <algorithm>
-#include <vector>
-#include <cmath>
-
-#include "opengm/opengm.hxx"
-#include "opengm/functions/function_registration.hxx"
-#include "opengm/functions/function_properties_base.hxx"
-#include "opengm/graphicalmodel/weights.hxx"
-
-namespace opengm {
-namespace functions {
-namespace learnable {
-
-
-
-
-
-template<class V, class I>
-struct FeaturesAndIndices{
-    std::vector<V> features;
-    std::vector<I> weightIds;
-};
-
-
-
-
-template<class T, class I , class L>
-class LUnary
-   : public opengm::FunctionBase<opengm::functions::learnable::LUnary<T, I, L>, T, I, L>
-{
-public:
-    typedef T ValueType;
-    typedef T V;
-    typedef L LabelType;
-    typedef I IndexType;
-
-    LUnary()
-    :  
-    weights_(NULL),
-    numberOfLabels_(0), 
-    offsets_(),
-    weightIds_(),
-    features_()
-    {
-
-    }
-
-    LUnary(
-        const opengm::learning::Weights<T>&     weights,
-        std::vector<FeaturesAndIndices<T, I> >  featuresAndIndicesPerLabel
-    );
-
-    LUnary(
-        const opengm::learning::Weights<T>& weights,    
-        const LabelType                     numberOfLabels,
-        marray::Marray< size_t >            weightIds,
-        marray::Marray< ValueType>          features,
-        const bool                          makeFirstEntryConst
-    );
-
-
-    L shape(const size_t) const;
-    size_t size() const;
-    size_t dimension() const;
-    template<class ITERATOR> T operator()(ITERATOR) const;
-
-    // parameters
-    void setWeights(const opengm::learning::Weights<T>& weights) const{
-        weights_ = &weights;
-    }
-
-    size_t numberOfWeights()const{
-        return weightIds_.size();
-    }
-
-    I weightIndex(const size_t weightNumber) const{
-        return weightIds_[weightNumber];
-    } 
-
-    template<class ITERATOR> 
-    T weightGradient(size_t,ITERATOR) const;
-
-private:
-
-
-protected:
-
-    size_t numWeightsForL(const LabelType l )const{
-        return offsets_[0*numberOfLabels_ + l];
-    }
-    size_t weightIdOffset(const LabelType l )const{
-        return offsets_[1*numberOfLabels_ + l];
-    }
-    size_t featureOffset(const LabelType l )const{
-        return offsets_[2*numberOfLabels_ + l];
-    }
-
-    mutable const opengm::learning::Weights<T> *    weights_;
-
-    IndexType numberOfLabels_;
-    std::vector<IndexType> offsets_;
-    std::vector<size_t> weightIds_;
-    std::vector<ValueType> features_;
-
-
-    friend class opengm::FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >;
-
-
-};
-
-template <class T, class I, class L>
-LUnary<T, I, L>::LUnary(
-    const opengm::learning::Weights<T>& weights,    
-    const LabelType                     numberOfLabels,
-    marray::Marray< size_t >            weightIds,
-    marray::Marray< ValueType>          features,
-    const bool                          makeFirstEntryConst
-)
-:  
-weights_(&weights),
-numberOfLabels_(numberOfLabels), 
-offsets_(numberOfLabels*3),
-weightIds_(),
-features_()
-{
-    const size_t pFeatDim       = features.dimension();
-    const size_t pWeightIdDim   = weightIds.dimension();
-
-    OPENGM_CHECK_OP(weightIds.dimension(), ==, 2 , "wrong dimension");
-    OPENGM_CHECK_OP(weightIds.shape(0)+int(makeFirstEntryConst), ==, numberOfLabels , "wrong shape");
-
-
-    const size_t nWeights = weightIds.size();
-    weightIds_.resize(nWeights);
-
-    const size_t nFeat  = features.size();
-    features_.resize(nFeat);
-
-
-    OPENGM_CHECK_OP(features.dimension(), == , 1 , "feature dimension must be 1 ");
-    OPENGM_CHECK_OP(features.shape(0), == , weightIds.shape(1) , "feature dimension must be 1");
-
-    // copy features
-    for(size_t fi=0; fi<nFeat; ++fi){
-        features_[fi] = features(fi);
-    }
-
-    size_t nwForL = weightIds.shape(1);
-    size_t wOffset = 0;
-
-    if(makeFirstEntryConst){
-
-        OPENGM_CHECK_OP(numberOfLabels_-1, == , weightIds.shape(0),"internal error");
-
-        offsets_[0*numberOfLabels_ + 0] = 0;
-        offsets_[1*numberOfLabels_ + 0] = 0;
-        offsets_[2*numberOfLabels_ + 0] = 0;
-
-        for(LabelType l=1; l<numberOfLabels_; ++l){
-            offsets_[0*numberOfLabels_ + l] = nwForL;
-            offsets_[1*numberOfLabels_ + l] = wOffset;
-            offsets_[2*numberOfLabels_ + l] = 0;
-            // copy weight ids
-            for(size_t wi=0; wi<nwForL; ++wi){
-                weightIds_[wOffset + wi] = weightIds(l-1,wi);
-            }
-            wOffset += nwForL;
-        }
-    }
-    else{
-        OPENGM_CHECK_OP(numberOfLabels_, == , weightIds.shape(0),"internal error");
-        for(LabelType l=0; l<numberOfLabels_; ++l){
-
-            offsets_[0*numberOfLabels_ + l] = nwForL;
-            offsets_[1*numberOfLabels_ + l] = wOffset;
-            offsets_[2*numberOfLabels_ + l] = 0;
-            // copy weight ids
-            for(size_t wi=0; wi<nwForL; ++wi){
-                weightIds_[wOffset + wi] = weightIds(l,wi);
-            }
-            wOffset += nwForL;
-        }
-    }
-
-}
-
-template <class T, class I, class L>
-inline
-LUnary<T, I, L>::LUnary
-( 
-   const opengm::learning::Weights<T> & weights, 
-   std::vector<FeaturesAndIndices<V, I> >  featuresAndIndicesPerLabel 
-)
-:  
-weights_(&weights),
-numberOfLabels_(featuresAndIndicesPerLabel.size()), 
-offsets_(featuresAndIndicesPerLabel.size()*3),
-weightIds_(),
-features_()
-{
-
-    size_t fOffset = 0;
-    size_t wOffset = 0;
-
-
-    // fetch the offsets
-    for(size_t l=0; l<featuresAndIndicesPerLabel.size(); ++l){
-        const size_t nwForL  = featuresAndIndicesPerLabel[l].weightIds.size();
-        const size_t nfForL  = featuresAndIndicesPerLabel[l].features.size();
-        OPENGM_CHECK_OP(nwForL, == , nfForL, "number of features and weighs"
-            "must be the same for a given label within this overload of LUnary<T, I, L>::LUnary");
-
-        offsets_[0*numberOfLabels_ + l] = nwForL;
-        offsets_[1*numberOfLabels_ + l] = wOffset;
-        offsets_[2*numberOfLabels_ + l] = fOffset;
-
-        wOffset += nwForL;
-        fOffset += nfForL;
-    }
-
-    weightIds_.resize(wOffset);
-    features_.resize(fOffset);
-
-    // write weightIDs and features
-    for(size_t l=0; l<featuresAndIndicesPerLabel.size(); ++l){
-        const size_t nwForL = numWeightsForL(l);
-        for(size_t i=0; i<nwForL; ++i){
-            weightIds_[featureOffset(l)+i] = featuresAndIndicesPerLabel[l].weightIds[i];
-            features_[featureOffset(l)+i] = featuresAndIndicesPerLabel[l].features[i];
-        }
-    }
-
-    // check that there are no duplicates
-    RandomAccessSet<size_t> idSet;
-    idSet.reserve(weightIds_.size());
-    idSet.insert(weightIds_.begin(), weightIds_.end());
-
-    OPENGM_CHECK_OP(idSet.size(), == , weightIds_.size(), "weightIds has duplicates");
-}
-
-
-
-template <class T, class I, class L>
-template <class ITERATOR>
-inline T
-LUnary<T, I, L>::weightGradient 
-(
-   size_t weightNumber,
-   ITERATOR begin
-) const {
-    OPENGM_CHECK_OP(weightNumber,<,numberOfWeights(), 
-        "weightNumber must be smaller than number of weights");
-    const LabelType l(*begin);
-    const size_t nwForL = numWeightsForL(l);
-    if(nwForL>0){
-        const size_t wiStart = weightIdOffset(l);
-        const size_t wiEnd   = weightIdOffset(l)+nwForL;
-        if(weightNumber >= wiStart && weightNumber < wiEnd ){
-            const size_t wii = weightNumber - wiStart;
-            return features_[featureOffset(l) + wii];
-        }
-    }
-    return static_cast<ValueType>(0);
-}
-
-template <class T, class I, class L>
-template <class ITERATOR>
-inline T
-LUnary<T, I, L>::operator()
-(
-   ITERATOR begin
-) const {
-
-    //std::cout<<"LUnary::operator()\n";
-    //OPENGM_CHECK_OP( int(weights_==NULL),==,int(false),"foo");
-    T val = 0;
-    const LabelType l(*begin);
-    const size_t nwForL = numWeightsForL(l);
-    //std::cout<<"nw for l "<<nwForL<<"\n";
-    //std::cout<<"wsize "<<weights_->size()<<"\n";
-
-    for(size_t i=0; i<nwForL; ++i){
-        //std::cout<<" i "<<i<<"\n";
-        //OPENGM_CHECK_OP(weightIdOffset(l)+i,<,weightIds_.size(),"foo");
-        //OPENGM_CHECK_OP(featureOffset(l)+i,<,features_.size(),"foo");
-        const size_t wi = weightIds_[weightIdOffset(l)+i];
-        //OPENGM_CHECK_OP(wi,<,weights_->size(),"foo");
-
-        val += weights_->getWeight(wi) * features_[featureOffset(l)+i];
-    }
-    //d::cout<<"LUnary::return operator()\n";
-    return val;
-}
-
-
-template <class T, class I, class L>
-inline L
-LUnary<T, I, L>::shape
-(
-   const size_t i
-) const {
-   return numberOfLabels_;
-}
-
-template <class T, class I, class L>
-inline size_t
-LUnary<T, I, L>::dimension() const {
-   return 1;
-}
-
-template <class T, class I, class L>
-inline size_t
-LUnary<T, I, L>::size() const {
-   return numberOfLabels_;
-}
-
-} // namespace learnable
-} // namespace functions
-
-
-/// FunctionSerialization
-template<class T, class I, class L>
-class FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> > {
-public:
-   typedef typename opengm::functions::learnable::LUnary<T, I, L>::ValueType ValueType;
-
-   static size_t indexSequenceSize(const opengm::functions::learnable::LUnary<T, I, L>&);
-   static size_t valueSequenceSize(const opengm::functions::learnable::LUnary<T, I, L>&);
-   template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR>
-      static void serialize(const opengm::functions::learnable::LUnary<T, I, L>&, INDEX_OUTPUT_ITERATOR, VALUE_OUTPUT_ITERATOR);
-   template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR>
-      static void deserialize( INDEX_INPUT_ITERATOR, VALUE_INPUT_ITERATOR, opengm::functions::learnable::LUnary<T, I, L>&);
-};
-
-template<class T, class I, class L>
-struct FunctionRegistration<opengm::functions::learnable::LUnary<T, I, L> > {
-   enum ID {
-      Id = opengm::FUNCTION_TYPE_ID_OFFSET + 100 + 66
-   };
-};
-
-template<class T, class I, class L>
-inline size_t
-FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::indexSequenceSize
-(
-   const opengm::functions::learnable::LUnary<T, I, L> & src
-) {
-
-    size_t size = 0;
-    size += 1; // numberOfLabels
-    size += 1; // numberOfWeights
-    size += 1; // numberOfFeatures
-
-    size += 3*src.shape(0);         // offsets serialization 
-    size += src.weightIds_.size();  // weight id serialization
-
-    return size;
-}
-
-template<class T, class I, class L>
-inline size_t
-FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::valueSequenceSize
-(
-   const opengm::functions::learnable::LUnary<T, I, L> & src
-) {
-  return src.features_.size(); // feature serialization
-}
-
-template<class T, class I, class L>
-template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR >
-inline void
-FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::serialize
-(
-    const opengm::functions::learnable::LUnary<T, I, L> & src,
-    INDEX_OUTPUT_ITERATOR indexOutIterator,
-    VALUE_OUTPUT_ITERATOR valueOutIterator
-) {
-
-    ///////////////////////////////////////
-    /// INDEX SERIALIZATION
-    ////////////////////////////////////////
-    // number of labels
-    *indexOutIterator = src.shape(0);
-    ++indexOutIterator; 
-
-    // number of weights
-    *indexOutIterator = src.weightIds_.size();
-    ++indexOutIterator; 
-    
-    // number of features
-    *indexOutIterator = src.features_.size();
-    ++indexOutIterator; 
-
-    // offset serialization
-    for(size_t i=0; i<src.offsets_.size(); ++i){
-        *indexOutIterator = src.offsets_[i];
-        ++indexOutIterator;
-    }
-
-    // weight id serialization
-    for(size_t i=0; i<src.weightIds_.size(); ++i){
-        *indexOutIterator = src.weightIds_[i];
-        ++indexOutIterator;
-    }
-
-    ///////////////////////////////////////
-    /// VALUE SERIALIZATION
-    ////////////////////////////////////////
-    // feature serialization
-    for(size_t i=0; i<src.features_.size(); ++i){
-        *valueOutIterator = src.features_[i];
-        ++valueOutIterator;
-    }
-}
-
-template<class T, class I, class L>
-template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR >
-inline void
-FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::deserialize
-(
-   INDEX_INPUT_ITERATOR indexInIterator,
-   VALUE_INPUT_ITERATOR valueInIterator,
-   opengm::functions::learnable::LUnary<T, I, L> & dst
-) { 
-
-
-
-    ///////////////////////////////////////
-    /// INDEX DESERIALIZATION
-    ////////////////////////////////////////
-    // number of labels
-    dst.numberOfLabels_ = *indexInIterator;
-    ++indexInIterator;
-    // resize offset accordingly
-    dst.offsets_.resize(3 * dst.numberOfLabels_);
-
-
-    // number of weights
-    const size_t nW =*indexInIterator;
-    ++indexInIterator;
-    // resize weightIds accordingly
-    dst.weightIds_.resize(nW);
-
-    // number of features
-    const size_t nF = *indexInIterator;
-    ++indexInIterator;
-    // resize weightIds accordingly
-    dst.features_.resize(nF);
-
-    // offset deserialization
-    for(size_t i=0; i<dst.offsets_.size(); ++i){
-        dst.offsets_[i] = *indexInIterator;
-        ++indexInIterator;
-    }
-
-    // weight id deserialization
-    for(size_t i=0; i<dst.weightIds_.size(); ++i){
-        dst.weightIds_[i] = *indexInIterator;
-        ++indexInIterator;
-    }
-
-    ///////////////////////////////////////
-    /// VALUE DESERIALIZATION
-    ////////////////////////////////////////
-    // feature deserialization
-    for(size_t i=0; i<dst.features_.size(); ++i){
-        dst.features_[i] = *valueInIterator;
-        ++valueInIterator;
-    } 
-}
-
-} // namespace opengm
-
-#endif // #ifndef OPENGM_LEARNABLE_FUNCTION_HXX
diff --git a/include/opengm/functions/learnable/lweightedsum_of_functions.hxx b/include/opengm/functions/learnable/lweightedsum_of_functions.hxx
deleted file mode 100644
index 62314f4..0000000
--- a/include/opengm/functions/learnable/lweightedsum_of_functions.hxx
+++ /dev/null
@@ -1,264 +0,0 @@
-#pragma once
-#ifndef OPENGM_LEARNABLE_LWEIGHTEDSUM_OF_FUNCTIONS_FUNCTION_HXX
-#define OPENGM_LEARNABLE_LWEIGHTEDSUM_OF_FUNCTIONS_FUNCTION_HXX
-
-#include <algorithm>
-#include <vector>
-#include <cmath>
-
-#include "opengm/opengm.hxx"
-#include "opengm/functions/function_registration.hxx"
-#include "opengm/functions/function_properties_base.hxx"
-#include "opengm/datastructures/marray/marray.hxx"
-#include "opengm/graphicalmodel/weights.hxx"
-
-namespace opengm {
-namespace functions {
-namespace learnable {
-
-/// Learnable weighted sum of feature-functions
-///
-/// f(x) = \sum_i w(i) * feat(i)(x)
-///  - w    = parameter vector
-///  - feat = feature-function vector
-///
-///  
-/// \ingroup functions
-template<class T, class I = size_t, class L = size_t>
-class LWeightedSumOfFunctions
-   : public opengm::FunctionBase<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>, T, I, L>
-{
-public:
-   typedef T ValueType;
-   typedef L LabelType;
-   typedef I IndexType;
- 
-   LWeightedSumOfFunctions();
-   LWeightedSumOfFunctions(const std::vector<L>& shape,
-      const opengm::learning::Weights<T>& weights,
-      const std::vector<size_t>& weightIDs,
-      const std::vector<marray::Marray<T> >& feat
-      );
- 
-   L shape(const size_t) const;
-   size_t size() const;
-   size_t dimension() const;
-   template<class ITERATOR> T operator()(ITERATOR) const;
- 
-   // parameters
-   void setWeights(const opengm::learning::Weights<T>& weights) const
-      {weights_ = &weights;}
-   size_t numberOfWeights()const
-     {return weightIDs_.size();}
-   I weightIndex(const size_t weightNumber) const
-     {return weightIDs_[weightNumber];} //dummy
-   template<class ITERATOR> 
-   T weightGradient(size_t,ITERATOR) const;
-
-protected:
-   mutable const opengm::learning::Weights<T>* weights_;
-   std::vector<L>                          shape_;
-   std::vector<size_t>                     weightIDs_;
-   std::vector<marray::Marray<T> >         feat_;
-
-   friend class opengm::FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >;
-};
-
-
-template <class T, class I, class L>
-inline
-LWeightedSumOfFunctions<T, I, L>::LWeightedSumOfFunctions
-( 
-   const std::vector<L>&                           shape,
-   const opengm::learning::Weights<T>&             weights,
-   const std::vector<size_t>&                      weightIDs,
-   const std::vector<marray::Marray<T> >&          feat
-   )
-   :   shape_(shape), weights_(&weights), weightIDs_(weightIDs),feat_(feat)
-{
-   OPENGM_ASSERT( weightIDs_.size() == feat_.size() );
-   for(size_t i=0; i<weightIDs_.size(); ++i){
-      OPENGM_ASSERT( size() == feat_[i].size() );
-      for(size_t j=0; j<dimension(); ++j)
-          OPENGM_ASSERT( shape_[j] == feat_[i].shape(j))
-   }
-}
-
-template <class T, class I, class L>
-inline
-LWeightedSumOfFunctions<T, I, L>::LWeightedSumOfFunctions()
-   : shape_(std::vector<L>(0)), weightIDs_(std::vector<size_t>(0)), feat_(std::vector<marray::Marray<T> >(0))
-{
-   ;
-}
-
-
-template <class T, class I, class L>
-template <class ITERATOR>
-inline T
-LWeightedSumOfFunctions<T, I, L>::weightGradient
-(
-   size_t weightNumber,
-   ITERATOR begin
-) const {
-  OPENGM_ASSERT(weightNumber< numberOfWeights());
-  return feat_[weightNumber](begin);
-}
-
-template <class T, class I, class L>
-template <class ITERATOR>
-inline T
-LWeightedSumOfFunctions<T, I, L>::operator()
-(
-   ITERATOR begin
-) const {
-   T val = 0;
-   for(size_t i=0;i<numberOfWeights();++i){
-      val += weights_->getWeight(weightIDs_[i]) * weightGradient(i,begin);
-   }
-   return val;
-}
-
-
-template <class T, class I, class L>
-inline L
-LWeightedSumOfFunctions<T, I, L>::shape
-(
-   const size_t i
-) const {
-   return shape_[i];
-}
-
-template <class T, class I, class L>
-inline size_t
-LWeightedSumOfFunctions<T, I, L>::dimension() const {
-   return shape_.size();
-}
-
-template <class T, class I, class L>
-inline size_t
-LWeightedSumOfFunctions<T, I, L>::size() const {
-   size_t s = 1;
-   for(size_t i=0; i<dimension(); ++i)
-      s *=shape_[i];
-   return s;
-}
-
-} // namespace learnable
-} // namespace functions
-
-
-/// FunctionSerialization
-template<class T, class I, class L>
-class FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> > {
-public:
-   typedef typename opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>::ValueType ValueType;
-
-   static size_t indexSequenceSize(const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>&);
-   static size_t valueSequenceSize(const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>&);
-   template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR>
-      static void serialize(const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>&, INDEX_OUTPUT_ITERATOR, VALUE_OUTPUT_ITERATOR);
-   template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR>
-      static void deserialize( INDEX_INPUT_ITERATOR, VALUE_INPUT_ITERATOR, opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>&);
-};
-
-template<class T, class I, class L>
-struct FunctionRegistration<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> > {
-   enum ID {
-      Id = opengm::FUNCTION_TYPE_ID_OFFSET + 100 + 67
-   };
-};
-
-template<class T, class I, class L>
-inline size_t
-FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >::indexSequenceSize
-(
-   const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> & src
-) {
-   return 1+src.shape_.size()+1+src.weightIDs_.size();
-}
-
-template<class T, class I, class L>
-inline size_t
-FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >::valueSequenceSize
-(
-   const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> & src
-) {
-   return src.feat_.size()*src.size();
-}
-
-template<class T, class I, class L>
-template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR >
-inline void
-FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >::serialize
-(
-   const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> & src,
-   INDEX_OUTPUT_ITERATOR indexOutIterator,
-   VALUE_OUTPUT_ITERATOR valueOutIterator
-) {
-   // save shape
-   *indexOutIterator = src.shape_.size();
-   ++indexOutIterator; 
-   for(size_t i=0; i<src.shape_.size();++i){
-      *indexOutIterator = src.shape_[i];
-      ++indexOutIterator; 
-   }
-   //save parameter ids
-   *indexOutIterator = src.weightIDs_.size();
-   ++indexOutIterator; 
-   for(size_t i=0; i<src.weightIDs_.size();++i){
-      *indexOutIterator = src.weightIDs_[i];
-      ++indexOutIterator; 
-   }
-
-   OPENGM_ASSERT_OP(src.weightIDs_.size(), ==, src.feat_.size());
-
-   // save features  
-   for(size_t i=0; i<src.weightIDs_.size();++i){
-      for(size_t j=0; j<src.feat_[i].size();++j){
-         *valueOutIterator = src.feat_[i](j);
-         ++valueOutIterator;
-      }
-   }
-}
-
-template<class T, class I, class L>
-template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR >
-inline void
-FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >::deserialize
-(
-   INDEX_INPUT_ITERATOR indexInIterator,
-   VALUE_INPUT_ITERATOR valueInIterator,
-   opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> & dst
-) { 
-   //read shape
-   size_t dim  = *indexInIterator;
-   size_t size = 1;
-   ++indexInIterator;
-   std::vector<L> shape(dim);
-   for(size_t i=0; i<dim;++i){
-      shape[i] = *indexInIterator;
-      size    *= *indexInIterator; 
-      ++indexInIterator;
-   }
-   //read parameter ids
-   size_t numW =*indexInIterator;
-   ++indexInIterator;
-   std::vector<size_t> parameterIDs(numW);
-   for(size_t i=0; i<numW;++i){ 
-      parameterIDs[i] = *indexInIterator;
-      ++indexInIterator;
-   }
-   //read features
-   std::vector<marray::Marray<T> > feat(numW,marray::Marray<T>(shape.begin(),shape.end()));
-   for(size_t i=0; i<numW;++i){   
-      for(size_t j=0; j<size;++j){
-         feat[i](j)=*valueInIterator;
-         ++valueInIterator;
-      }
-   }   
-}
-
-} // namespace opengm
-
-#endif //OPENGM_LEARNABLE_LWEIGHTEDSUM_OF_FUNCTIONS_FUNCTION_HXX
diff --git a/include/opengm/python/opengmpython.hxx b/include/opengm/python/opengmpython.hxx
index e579e6d..4f771f2 100644
--- a/include/opengm/python/opengmpython.hxx
+++ b/include/opengm/python/opengmpython.hxx
@@ -18,8 +18,6 @@
 #include "opengm/functions/truncated_squared_difference.hxx"
 #include "opengm/functions/sparsemarray.hxx"
 
-#include "opengm/functions/learnable/lpotts.hxx"
-#include "opengm/functions/learnable/lunary.hxx"
 
 #include <opengm/python/opengmpython.hxx>
 #include <opengm/python/converter.hxx>
@@ -66,8 +64,8 @@ namespace python{
         typedef opengm::TruncatedAbsoluteDifferenceFunction   <ValueType,IndexType,LabelType> PyTruncatedAbsoluteDifferenceFunction;
         typedef opengm::TruncatedSquaredDifferenceFunction    <ValueType,IndexType,LabelType> PyTruncatedSquaredDifferenceFunction;
         typedef opengm::SparseFunction                        <ValueType,IndexType,LabelType> PySparseFunction; 
-        typedef opengm::functions::learnable::LPotts          <ValueType,IndexType,LabelType> PyLPottsFunction;
-        typedef opengm::functions::learnable::LUnary          <ValueType,IndexType,LabelType> PyLUnaryFunction;
+        //typedef opengm::functions::learnable::LPotts          <ValueType,IndexType,LabelType> PyLPottsFunction;
+        //typedef opengm::functions::learnable::LUnary          <ValueType,IndexType,LabelType> PyLUnaryFunction;
 
 
         typedef typename opengm::meta::TypeListGenerator<
@@ -78,8 +76,8 @@ namespace python{
             PyTruncatedAbsoluteDifferenceFunction,
             PyTruncatedSquaredDifferenceFunction,
             PySparseFunction,
-            PyLPottsFunction,
-            PyLUnaryFunction
+            //PyLPottsFunction,
+            //PyLUnaryFunction
         >::type type;
    };
 
@@ -103,8 +101,8 @@ namespace python{
    typedef opengm::SquaredDifferenceFunction             <GmValueType,GmIndexType,GmLabelType> GmSquaredDifferenceFunction;
    typedef opengm::TruncatedSquaredDifferenceFunction    <GmValueType,GmIndexType,GmLabelType> GmTruncatedSquaredDifferenceFunction;
    typedef opengm::SparseFunction                        <GmValueType,GmIndexType,GmLabelType> GmSparseFunction; 
-   typedef opengm::functions::learnable::LPotts          <GmValueType,GmIndexType,GmLabelType> PyLPottsFunction;
-   typedef opengm::functions::learnable::LUnary          <GmValueType,GmIndexType,GmLabelType> PyLUnaryFunction;
+   //typedef opengm::functions::learnable::LPotts          <GmValueType,GmIndexType,GmLabelType> PyLPottsFunction;
+   //typedef opengm::functions::learnable::LUnary          <GmValueType,GmIndexType,GmLabelType> PyLUnaryFunction;
    
    typedef std::vector<GmIndexType> IndexVectorType;
    typedef std::vector<IndexVectorType> IndexVectorVectorType;
diff --git a/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx b/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
index f5a343a..03ea180 100644
--- a/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
+++ b/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
@@ -26,9 +26,6 @@
 #include "opengm/functions/truncated_squared_difference.hxx"
 #include "opengm/functions/sparsemarray.hxx"
 
-#include "opengm/functions/learnable/lpotts.hxx"
-#include "opengm/functions/learnable/lunary.hxx"
-#include "opengm/functions/learnable/lweightedsum_of_functions.hxx"
 
 
 using namespace boost::python;
@@ -174,113 +171,6 @@ namespace pyfunction{
 
 
 
-   template<class FUNCTION>
-   FUNCTION * lPottsConstructor(
-        opengm::python::PyWeights & pyWeights,
-        const opengm::python::GmLabelType numberOfLabels,
-        opengm::python::NumpyView<opengm::python::GmIndexType,1> weightIds,
-        opengm::python::NumpyView<opengm::python::GmValueType,1> features
-
-    ){
-      FUNCTION * f = NULL;
-      
-      std::vector<size_t>      weightIdVec(weightIds.begin(), weightIds.end());
-      std::vector<opengm::python::GmValueType> featureVec(features.begin(), features.end());
-
-      f = new FUNCTION(pyWeights, numberOfLabels, weightIdVec, featureVec);
-      return f;
-   }
-
-
-    template<class FUNCTION>
-    FUNCTION * lUnaryConstructor(
-        opengm::python::PyWeights & pyWeights,
-        const opengm::python::GmLabelType numberOfLabels,
-        opengm::python::NumpyView<opengm::python::GmIndexType,2> weightIds,
-        opengm::python::NumpyView<opengm::python::GmValueType,2> features
-    ){
-        FUNCTION * f = NULL;
-        typedef opengm::functions::learnable::FeaturesAndIndices<
-            opengm::python::GmValueType,
-            opengm::python::GmIndexType
-        > FI;
-        typedef std::vector<FI> FI_VEC;
-
-        size_t fPerL = weightIds.shape(1);
-
-        OPENGM_CHECK_OP(weightIds.shape(0), <=, numberOfLabels,   "wrong shapes");
-        OPENGM_CHECK_OP(weightIds.shape(0), >=, numberOfLabels-1,   "wrong shapes");
-        OPENGM_CHECK_OP(weightIds.shape(0), ==, features.shape(0),"wrong shapes");
-        OPENGM_CHECK_OP(weightIds.shape(1), ==, features.shape(1),"wrong shapes");
-
-        FI_VEC fiVec(numberOfLabels);
-
-        const size_t weightShape0 =  weightIds.shape(0);
-        for(size_t l=0; l<weightShape0; ++l){
-            fiVec[l].weightIds.resize(fPerL);
-            fiVec[l].features.resize(fPerL);
-            for(size_t i=0; i<fPerL; ++i){
-                fiVec[l].weightIds[i] = weightIds(l, i);
-                fiVec[l].features[i] = features(l, i);
-            }
-        }
-        //std::cout<<"done on python side\n";
-        f = new FUNCTION(pyWeights, fiVec);
-        return f;
-    }
-
-    template<class FUNCTION>
-    FUNCTION * lUnaryConstructorList(
-        opengm::python::PyWeights & pyWeights,
-        const opengm::python::GmLabelType numberOfLabels,
-        boost::python::list weightIds,
-        boost::python::list features
-    ){
-
-        typedef opengm::python::NumpyView<opengm::python::GmIndexType,1> IndexArray;
-        typedef opengm::python::NumpyView<opengm::python::GmValueType,1> ValueArray;
-
-
-        OPENGM_CHECK_OP(boost::python::len(weightIds), == ,numberOfLabels ,"length of weightIds must be numberOfLabels");
-        OPENGM_CHECK_OP(boost::python::len(weightIds), == ,boost::python::len(features) ,"weightIds must be as long as features");
-
-
-        FUNCTION * f = NULL;
-        typedef opengm::functions::learnable::FeaturesAndIndices<
-            opengm::python::GmValueType,
-            opengm::python::GmIndexType
-        > FI;
-        typedef std::vector<FI> FI_VEC;
-
-        FI_VEC fiVec(numberOfLabels);
-
-        for(size_t l=0; l<numberOfLabels; ++l){
-
-            std::cout<<"extr. l "<<l<<"\n";
-            boost::python::extract<boost::python::numeric::array> eW(weightIds[l]);
-            boost::python::extract<boost::python::numeric::array> eF(features[l]);
-
-            IndexArray wId = eW();
-            ValueArray fs = eF();
-
-            std::cout<<"done\n";
-
-            OPENGM_CHECK_OP(wId.shape(0), ==, fs.shape(0), 
-                "for one label the number of features and the number of weights must be the same");
-
-            const size_t fPerL = wId.shape(0);
-            fiVec[l].weightIds.resize(fPerL);
-            fiVec[l].features.resize(fPerL);
-
-            for(size_t i=0; i<fPerL; ++i){
-                fiVec[l].weightIds[i] = wId(i);
-                fiVec[l].features[i] = fs(i);
-            }
-        }
-        f = new FUNCTION(pyWeights, fiVec);
-        return f;
-   }
-
     template<class FUNCTION>
     FUNCTION * weightedSumOfFunctionsConstructor(
         boost::python::object pyShape,
@@ -476,9 +366,9 @@ void export_functiontypes(){
    typedef opengm::SquaredDifferenceFunction                       <ValueType,IndexType,LabelType> PySquaredDifferenceFunction;
    typedef opengm::TruncatedSquaredDifferenceFunction              <ValueType,IndexType,LabelType> PyTruncatedSquaredDifferenceFunction;
    typedef opengm::SparseFunction                                  <ValueType,IndexType,LabelType> PySparseFunction; 
-   typedef opengm::functions::learnable::LPotts                    <ValueType,IndexType,LabelType> PyLPottsFunction;
-   typedef opengm::functions::learnable::LUnary                    <ValueType,IndexType,LabelType> PyLUnaryFunction;
-   typedef opengm::functions::learnable::LWeightedSumOfFunctions   <ValueType,IndexType,LabelType> PyLSumOfWeightedFunction;
+   //typedef opengm::functions::learnable::LPotts                    <ValueType,IndexType,LabelType> PyLPottsFunction;
+   //typedef opengm::functions::learnable::LUnary                    <ValueType,IndexType,LabelType> PyLUnaryFunction;
+   //typedef opengm::functions::learnable::LWeightedSumOfFunctions   <ValueType,IndexType,LabelType> PyLSumOfWeightedFunction;
 
    // vector exporters
    export_function_type_vector<PyExplicitFunction>("ExplicitFunctionVector");
@@ -726,54 +616,6 @@ void export_functiontypes(){
    ;
    
 
-
-   FUNCTION_TYPE_EXPORTER_HELPER(PyLPottsFunction,"LPottsFunction")
-    .def("__init__", make_constructor(&pyfunction::lPottsConstructor<PyLPottsFunction> ,default_call_policies(),
-         (
-            boost::python::arg("weights"),
-            boost::python::arg("numberOfLabels"),
-            boost::python::arg("weightIds"),
-            boost::python::arg("features")
-         )
-      ),
-   "todo"
-   );
-
-    FUNCTION_TYPE_EXPORTER_HELPER(PyLUnaryFunction,"LUnaryFunction")
-    .def("__init__", make_constructor(&pyfunction::lUnaryConstructor<PyLUnaryFunction> ,default_call_policies(),
-         (
-            boost::python::arg("weights"),
-            boost::python::arg("numberOfLabels"),
-            boost::python::arg("weightIds"),
-            boost::python::arg("features")
-         )
-      ),
-   "todo"
-    )
-    .def("__init__", make_constructor(&pyfunction::lUnaryConstructorList<PyLUnaryFunction> ,default_call_policies(),
-         (
-            boost::python::arg("weights"),
-            boost::python::arg("numberOfLabels"),
-            boost::python::arg("weightIds"),
-            boost::python::arg("features")
-         )
-      ),
-   "todo"
-    )
-    ;
-
-    FUNCTION_TYPE_EXPORTER_HELPER(PyLSumOfWeightedFunction,"SumOfExpertsFunction")
-    .def("__init__", make_constructor(&pyfunction::weightedSumOfFunctionsConstructor<PyLSumOfWeightedFunction> ,default_call_policies(),
-         (
-            boost::python::arg("shape"),
-            boost::python::arg("weight"),
-            boost::python::arg("weightIds"),
-            boost::python::arg("features")
-         )
-      ),
-   "todo"
-    )
-    ;
 }
 
 template void export_functiontypes<opengm::python::GmValueType,opengm::python::GmIndexType>();

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list