[opengm] 375/386: merge changes for functions and graphical model from the learning branch
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:37 UTC 2016
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch debian/master
in repository opengm.
commit 74026daeb3524c8a936c2c817bbfae98b4d7f5fe
Author: Joerg Kappes <kappes at math.uni-heidelberg.de>
Date: Wed Aug 3 22:28:30 2016 +0200
merge changes for functions and graphical model from the learning branch
---
include/opengm/functions/learnable/lpotts.hxx | 254 +++++++++++
include/opengm/functions/learnable/lunary.hxx | 478 +++++++++++++++++++++
.../learnable/lweightedsum_of_functions.hxx | 264 ++++++++++++
include/opengm/functions/unary_loss_function.hxx | 151 +++++++
include/opengm/graphicalmodel/weights.hxx | 282 ++++++++++++
src/unittest/test_learnable_functions.cxx | 79 ++++
6 files changed, 1508 insertions(+)
diff --git a/include/opengm/functions/learnable/lpotts.hxx b/include/opengm/functions/learnable/lpotts.hxx
new file mode 100644
index 0000000..a9938c6
--- /dev/null
+++ b/include/opengm/functions/learnable/lpotts.hxx
@@ -0,0 +1,254 @@
+#pragma once
+#ifndef OPENGM_LEARNABLE_POTTS_FUNCTION_HXX
+#define OPENGM_LEARNABLE_POTTS_FUNCTION_HXX
+
+#include <algorithm>
+#include <vector>
+#include <cmath>
+
+#include "opengm/opengm.hxx"
+#include "opengm/functions/function_registration.hxx"
+#include "opengm/functions/function_properties_base.hxx"
+#include "opengm/graphicalmodel/weights.hxx"
+
+namespace opengm {
+namespace functions {
+namespace learnable {
+
+/// Learnable feature function for two variables
+///
+/// f(u,v) = (\sum_i w_i * feat_i) I(u!=v)
+/// - w = parameter vector
+/// - feat = feature vector
+///
+/// derive from this class and implement the function
+/// paramaterGradient(i,x)= A(x)_{i,*}*feat
+///
+/// \ingroup functions
+template<class T, class I = size_t, class L = size_t>
+class LPotts
+ : public opengm::FunctionBase<opengm::functions::learnable::LPotts<T, I, L>, T, I, L>
+{
+public:
+ typedef T ValueType;
+ typedef L LabelType;
+ typedef I IndexType;
+
+ LPotts();
+ LPotts(const opengm::learning::Weights<T>& weights,
+ const L numLabels,
+ const std::vector<size_t>& weightIDs,
+ const std::vector<T>& feat
+ );
+ LPotts(const L numLabels,
+ const std::vector<size_t>& weightIDs,
+ const std::vector<T>& feat
+ );
+ L shape(const size_t) const;
+ size_t size() const;
+ size_t dimension() const;
+ template<class ITERATOR> T operator()(ITERATOR) const;
+
+ // parameters
+ void setWeights(const opengm::learning::Weights<T>& weights) const
+ {weights_ = &weights;}
+ size_t numberOfWeights()const
+ {return weightIDs_.size();}
+ I weightIndex(const size_t weightNumber) const
+ {return weightIDs_[weightNumber];} //dummy
+ template<class ITERATOR>
+ T weightGradient(size_t,ITERATOR) const;
+
+ bool isPotts() const {return true;}
+ bool isGeneralizedPotts() const {return true;}
+
+protected:
+ mutable const opengm::learning::Weights<T> * weights_;
+ L numLabels_;
+ std::vector<size_t> weightIDs_;
+ std::vector<T> feat_;
+
+
+ friend class opengm::FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >;
+};
+
+
+template <class T, class I, class L>
+inline
+LPotts<T, I, L>::LPotts
+(
+ const opengm::learning::Weights<T>& weights,
+ const L numLabels,
+ const std::vector<size_t>& weightIDs,
+ const std::vector<T>& feat
+ )
+ : weights_(&weights), numLabels_(numLabels), weightIDs_(weightIDs),feat_(feat)
+{
+ OPENGM_ASSERT( weightIDs_.size()==feat_.size() );
+}
+
+template <class T, class I, class L>
+inline
+LPotts<T, I, L>::LPotts
+(
+ const L numLabels,
+ const std::vector<size_t>& weightIDs,
+ const std::vector<T>& feat
+ )
+ : numLabels_(numLabels), weightIDs_(weightIDs),feat_(feat)
+{
+ OPENGM_ASSERT( weightIDs_.size()==feat_.size() );
+}
+
+template <class T, class I, class L>
+inline
+LPotts<T, I, L>::LPotts
+( )
+ : numLabels_(0), weightIDs_(std::vector<size_t>(0)), feat_(std::vector<T>(0))
+{
+ OPENGM_ASSERT( weightIDs_.size()==feat_.size() );
+}
+
+
+template <class T, class I, class L>
+template <class ITERATOR>
+inline T
+LPotts<T, I, L>::weightGradient
+(
+ size_t weightNumber,
+ ITERATOR begin
+) const {
+ OPENGM_ASSERT(weightNumber< numberOfWeights());
+ if( *(begin) != *(begin+1) )
+ return (*this).feat_[weightNumber];
+ return 0;
+}
+
+template <class T, class I, class L>
+template <class ITERATOR>
+inline T
+LPotts<T, I, L>::operator()
+(
+ ITERATOR begin
+) const {
+ T val = 0;
+ for(size_t i=0;i<numberOfWeights();++i){
+ val += weights_->getWeight(weightIDs_[i]) * weightGradient(i,begin);
+ }
+ return val;
+}
+
+
+template <class T, class I, class L>
+inline L
+LPotts<T, I, L>::shape
+(
+ const size_t i
+) const {
+ return numLabels_;
+}
+
+template <class T, class I, class L>
+inline size_t
+LPotts<T, I, L>::dimension() const {
+ return 2;
+}
+
+template <class T, class I, class L>
+inline size_t
+LPotts<T, I, L>::size() const {
+ return numLabels_*numLabels_;
+}
+
+} // namespace learnable
+} // namespace functions
+
+
+/// FunctionSerialization
+template<class T, class I, class L>
+class FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> > {
+public:
+ typedef typename opengm::functions::learnable::LPotts<T, I, L>::ValueType ValueType;
+
+ static size_t indexSequenceSize(const opengm::functions::learnable::LPotts<T, I, L>&);
+ static size_t valueSequenceSize(const opengm::functions::learnable::LPotts<T, I, L>&);
+ template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR>
+ static void serialize(const opengm::functions::learnable::LPotts<T, I, L>&, INDEX_OUTPUT_ITERATOR, VALUE_OUTPUT_ITERATOR);
+ template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR>
+ static void deserialize( INDEX_INPUT_ITERATOR, VALUE_INPUT_ITERATOR, opengm::functions::learnable::LPotts<T, I, L>&);
+};
+
+template<class T, class I, class L>
+struct FunctionRegistration<opengm::functions::learnable::LPotts<T, I, L> > {
+ enum ID {
+ Id = opengm::FUNCTION_TYPE_ID_OFFSET + 100 + 65
+ };
+};
+
+template<class T, class I, class L>
+inline size_t
+FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::indexSequenceSize
+(
+ const opengm::functions::learnable::LPotts<T, I, L> & src
+) {
+ return 2+src.weightIDs_.size();
+}
+
+template<class T, class I, class L>
+inline size_t
+FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::valueSequenceSize
+(
+ const opengm::functions::learnable::LPotts<T, I, L> & src
+) {
+ return src.feat_.size();
+}
+
+template<class T, class I, class L>
+template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR >
+inline void
+FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::serialize
+(
+ const opengm::functions::learnable::LPotts<T, I, L> & src,
+ INDEX_OUTPUT_ITERATOR indexOutIterator,
+ VALUE_OUTPUT_ITERATOR valueOutIterator
+) {
+ *indexOutIterator = src.numLabels_;
+ ++indexOutIterator;
+ *indexOutIterator = src.feat_.size();
+ ++indexOutIterator;
+ for(size_t i=0; i<src.weightIDs_.size();++i){
+ *indexOutIterator = src.weightIndex(i);
+ ++indexOutIterator;
+ }
+ for(size_t i=0; i<src.feat_.size();++i){
+ *valueOutIterator = src.feat_[i];
+ ++valueOutIterator;
+ }
+}
+
+template<class T, class I, class L>
+template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR >
+inline void
+FunctionSerialization<opengm::functions::learnable::LPotts<T, I, L> >::deserialize
+(
+ INDEX_INPUT_ITERATOR indexInIterator,
+ VALUE_INPUT_ITERATOR valueInIterator,
+ opengm::functions::learnable::LPotts<T, I, L> & dst
+) {
+ dst.numLabels_=*indexInIterator;
+ ++ indexInIterator;
+ const size_t numW=*indexInIterator;
+ ++indexInIterator;
+ dst.feat_.resize(numW);
+ dst.weightIDs_.resize(numW);
+ for(size_t i=0; i<numW;++i){
+ dst.feat_[i]=*valueInIterator;
+ dst.weightIDs_[i]=*indexInIterator;
+ ++indexInIterator;
+ ++valueInIterator;
+ }
+}
+
+} // namespace opengm
+
+#endif // #ifndef OPENGM_LEARNABLE_FUNCTION_HXX
diff --git a/include/opengm/functions/learnable/lunary.hxx b/include/opengm/functions/learnable/lunary.hxx
new file mode 100644
index 0000000..cc1b930
--- /dev/null
+++ b/include/opengm/functions/learnable/lunary.hxx
@@ -0,0 +1,478 @@
+#pragma once
+#ifndef OPENGM_LEARNABLE_UNARY_FUNCTION_HXX
+#define OPENGM_LEARNABLE_UNARY_FUNCTION_HXX
+
+#include <algorithm>
+#include <vector>
+#include <cmath>
+
+#include "opengm/opengm.hxx"
+#include "opengm/functions/function_registration.hxx"
+#include "opengm/functions/function_properties_base.hxx"
+#include "opengm/graphicalmodel/weights.hxx"
+
+namespace opengm {
+namespace functions {
+namespace learnable {
+
+
+
+
+
+template<class V, class I>
+struct FeaturesAndIndices{
+ std::vector<V> features;
+ std::vector<I> weightIds;
+};
+
+
+
+
+template<class T, class I , class L>
+class LUnary
+ : public opengm::FunctionBase<opengm::functions::learnable::LUnary<T, I, L>, T, I, L>
+{
+public:
+ typedef T ValueType;
+ typedef T V;
+ typedef L LabelType;
+ typedef I IndexType;
+
+ LUnary()
+ :
+ weights_(NULL),
+ numberOfLabels_(0),
+ offsets_(),
+ weightIds_(),
+ features_()
+ {
+
+ }
+
+ LUnary(
+ const opengm::learning::Weights<T>& weights,
+ std::vector<FeaturesAndIndices<T, I> > featuresAndIndicesPerLabel
+ );
+
+ LUnary(
+ const opengm::learning::Weights<T>& weights,
+ const LabelType numberOfLabels,
+ marray::Marray< size_t > weightIds,
+ marray::Marray< ValueType> features,
+ const bool makeFirstEntryConst
+ );
+
+
+ L shape(const size_t) const;
+ size_t size() const;
+ size_t dimension() const;
+ template<class ITERATOR> T operator()(ITERATOR) const;
+
+ // parameters
+ void setWeights(const opengm::learning::Weights<T>& weights) const{
+ weights_ = &weights;
+ }
+
+ size_t numberOfWeights()const{
+ return weightIds_.size();
+ }
+
+ I weightIndex(const size_t weightNumber) const{
+ return weightIds_[weightNumber];
+ }
+
+ template<class ITERATOR>
+ T weightGradient(size_t,ITERATOR) const;
+
+private:
+
+
+protected:
+
+ size_t numWeightsForL(const LabelType l )const{
+ return offsets_[0*numberOfLabels_ + l];
+ }
+ size_t weightIdOffset(const LabelType l )const{
+ return offsets_[1*numberOfLabels_ + l];
+ }
+ size_t featureOffset(const LabelType l )const{
+ return offsets_[2*numberOfLabels_ + l];
+ }
+
+ mutable const opengm::learning::Weights<T> * weights_;
+
+ IndexType numberOfLabels_;
+ std::vector<IndexType> offsets_;
+ std::vector<size_t> weightIds_;
+ std::vector<ValueType> features_;
+
+
+ friend class opengm::FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >;
+
+
+};
+
+template <class T, class I, class L>
+LUnary<T, I, L>::LUnary(
+ const opengm::learning::Weights<T>& weights,
+ const LabelType numberOfLabels,
+ marray::Marray< size_t > weightIds,
+ marray::Marray< ValueType> features,
+ const bool makeFirstEntryConst
+)
+:
+weights_(&weights),
+numberOfLabels_(numberOfLabels),
+offsets_(numberOfLabels*3),
+weightIds_(),
+features_()
+{
+ const size_t pFeatDim = features.dimension();
+ const size_t pWeightIdDim = weightIds.dimension();
+
+ OPENGM_CHECK_OP(weightIds.dimension(), ==, 2 , "wrong dimension");
+ OPENGM_CHECK_OP(weightIds.shape(0)+int(makeFirstEntryConst), ==, numberOfLabels , "wrong shape");
+
+
+ const size_t nWeights = weightIds.size();
+ weightIds_.resize(nWeights);
+
+ const size_t nFeat = features.size();
+ features_.resize(nFeat);
+
+
+ OPENGM_CHECK_OP(features.dimension(), == , 1 , "feature dimension must be 1 ");
+ OPENGM_CHECK_OP(features.shape(0), == , weightIds.shape(1) , "feature dimension must be 1");
+
+ // copy features
+ for(size_t fi=0; fi<nFeat; ++fi){
+ features_[fi] = features(fi);
+ }
+
+ size_t nwForL = weightIds.shape(1);
+ size_t wOffset = 0;
+
+ if(makeFirstEntryConst){
+
+ OPENGM_CHECK_OP(numberOfLabels_-1, == , weightIds.shape(0),"internal error");
+
+ offsets_[0*numberOfLabels_ + 0] = 0;
+ offsets_[1*numberOfLabels_ + 0] = 0;
+ offsets_[2*numberOfLabels_ + 0] = 0;
+
+ for(LabelType l=1; l<numberOfLabels_; ++l){
+ offsets_[0*numberOfLabels_ + l] = nwForL;
+ offsets_[1*numberOfLabels_ + l] = wOffset;
+ offsets_[2*numberOfLabels_ + l] = 0;
+ // copy weight ids
+ for(size_t wi=0; wi<nwForL; ++wi){
+ weightIds_[wOffset + wi] = weightIds(l-1,wi);
+ }
+ wOffset += nwForL;
+ }
+ }
+ else{
+ OPENGM_CHECK_OP(numberOfLabels_, == , weightIds.shape(0),"internal error");
+ for(LabelType l=0; l<numberOfLabels_; ++l){
+
+ offsets_[0*numberOfLabels_ + l] = nwForL;
+ offsets_[1*numberOfLabels_ + l] = wOffset;
+ offsets_[2*numberOfLabels_ + l] = 0;
+ // copy weight ids
+ for(size_t wi=0; wi<nwForL; ++wi){
+ weightIds_[wOffset + wi] = weightIds(l,wi);
+ }
+ wOffset += nwForL;
+ }
+ }
+
+}
+
+template <class T, class I, class L>
+inline
+LUnary<T, I, L>::LUnary
+(
+ const opengm::learning::Weights<T> & weights,
+ std::vector<FeaturesAndIndices<V, I> > featuresAndIndicesPerLabel
+)
+:
+weights_(&weights),
+numberOfLabels_(featuresAndIndicesPerLabel.size()),
+offsets_(featuresAndIndicesPerLabel.size()*3),
+weightIds_(),
+features_()
+{
+
+ size_t fOffset = 0;
+ size_t wOffset = 0;
+
+
+ // fetch the offsets
+ for(size_t l=0; l<featuresAndIndicesPerLabel.size(); ++l){
+ const size_t nwForL = featuresAndIndicesPerLabel[l].weightIds.size();
+ const size_t nfForL = featuresAndIndicesPerLabel[l].features.size();
+ OPENGM_CHECK_OP(nwForL, == , nfForL, "number of features and weighs"
+ "must be the same for a given label within this overload of LUnary<T, I, L>::LUnary");
+
+ offsets_[0*numberOfLabels_ + l] = nwForL;
+ offsets_[1*numberOfLabels_ + l] = wOffset;
+ offsets_[2*numberOfLabels_ + l] = fOffset;
+
+ wOffset += nwForL;
+ fOffset += nfForL;
+ }
+
+ weightIds_.resize(wOffset);
+ features_.resize(fOffset);
+
+ // write weightIDs and features
+ for(size_t l=0; l<featuresAndIndicesPerLabel.size(); ++l){
+ const size_t nwForL = numWeightsForL(l);
+ for(size_t i=0; i<nwForL; ++i){
+ weightIds_[featureOffset(l)+i] = featuresAndIndicesPerLabel[l].weightIds[i];
+ features_[featureOffset(l)+i] = featuresAndIndicesPerLabel[l].features[i];
+ }
+ }
+
+ // check that there are no duplicates
+ RandomAccessSet<size_t> idSet;
+ idSet.reserve(weightIds_.size());
+ idSet.insert(weightIds_.begin(), weightIds_.end());
+
+ OPENGM_CHECK_OP(idSet.size(), == , weightIds_.size(), "weightIds has duplicates");
+}
+
+
+
+template <class T, class I, class L>
+template <class ITERATOR>
+inline T
+LUnary<T, I, L>::weightGradient
+(
+ size_t weightNumber,
+ ITERATOR begin
+) const {
+ OPENGM_CHECK_OP(weightNumber,<,numberOfWeights(),
+ "weightNumber must be smaller than number of weights");
+ const LabelType l(*begin);
+ const size_t nwForL = numWeightsForL(l);
+ if(nwForL>0){
+ const size_t wiStart = weightIdOffset(l);
+ const size_t wiEnd = weightIdOffset(l)+nwForL;
+ if(weightNumber >= wiStart && weightNumber < wiEnd ){
+ const size_t wii = weightNumber - wiStart;
+ return features_[featureOffset(l) + wii];
+ }
+ }
+ return static_cast<ValueType>(0);
+}
+
+template <class T, class I, class L>
+template <class ITERATOR>
+inline T
+LUnary<T, I, L>::operator()
+(
+ ITERATOR begin
+) const {
+
+ //std::cout<<"LUnary::operator()\n";
+ //OPENGM_CHECK_OP( int(weights_==NULL),==,int(false),"foo");
+ T val = 0;
+ const LabelType l(*begin);
+ const size_t nwForL = numWeightsForL(l);
+ //std::cout<<"nw for l "<<nwForL<<"\n";
+ //std::cout<<"wsize "<<weights_->size()<<"\n";
+
+ for(size_t i=0; i<nwForL; ++i){
+ //std::cout<<" i "<<i<<"\n";
+ //OPENGM_CHECK_OP(weightIdOffset(l)+i,<,weightIds_.size(),"foo");
+ //OPENGM_CHECK_OP(featureOffset(l)+i,<,features_.size(),"foo");
+ const size_t wi = weightIds_[weightIdOffset(l)+i];
+ //OPENGM_CHECK_OP(wi,<,weights_->size(),"foo");
+
+ val += weights_->getWeight(wi) * features_[featureOffset(l)+i];
+ }
+ //d::cout<<"LUnary::return operator()\n";
+ return val;
+}
+
+
+template <class T, class I, class L>
+inline L
+LUnary<T, I, L>::shape
+(
+ const size_t i
+) const {
+ return numberOfLabels_;
+}
+
+template <class T, class I, class L>
+inline size_t
+LUnary<T, I, L>::dimension() const {
+ return 1;
+}
+
+template <class T, class I, class L>
+inline size_t
+LUnary<T, I, L>::size() const {
+ return numberOfLabels_;
+}
+
+} // namespace learnable
+} // namespace functions
+
+
+/// FunctionSerialization
+template<class T, class I, class L>
+class FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> > {
+public:
+ typedef typename opengm::functions::learnable::LUnary<T, I, L>::ValueType ValueType;
+
+ static size_t indexSequenceSize(const opengm::functions::learnable::LUnary<T, I, L>&);
+ static size_t valueSequenceSize(const opengm::functions::learnable::LUnary<T, I, L>&);
+ template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR>
+ static void serialize(const opengm::functions::learnable::LUnary<T, I, L>&, INDEX_OUTPUT_ITERATOR, VALUE_OUTPUT_ITERATOR);
+ template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR>
+ static void deserialize( INDEX_INPUT_ITERATOR, VALUE_INPUT_ITERATOR, opengm::functions::learnable::LUnary<T, I, L>&);
+};
+
+template<class T, class I, class L>
+struct FunctionRegistration<opengm::functions::learnable::LUnary<T, I, L> > {
+ enum ID {
+ Id = opengm::FUNCTION_TYPE_ID_OFFSET + 100 + 66
+ };
+};
+
+template<class T, class I, class L>
+inline size_t
+FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::indexSequenceSize
+(
+ const opengm::functions::learnable::LUnary<T, I, L> & src
+) {
+
+ size_t size = 0;
+ size += 1; // numberOfLabels
+ size += 1; // numberOfWeights
+ size += 1; // numberOfFeatures
+
+ size += 3*src.shape(0); // offsets serialization
+ size += src.weightIds_.size(); // weight id serialization
+
+ return size;
+}
+
+template<class T, class I, class L>
+inline size_t
+FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::valueSequenceSize
+(
+ const opengm::functions::learnable::LUnary<T, I, L> & src
+) {
+ return src.features_.size(); // feature serialization
+}
+
+template<class T, class I, class L>
+template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR >
+inline void
+FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::serialize
+(
+ const opengm::functions::learnable::LUnary<T, I, L> & src,
+ INDEX_OUTPUT_ITERATOR indexOutIterator,
+ VALUE_OUTPUT_ITERATOR valueOutIterator
+) {
+
+ ///////////////////////////////////////
+ /// INDEX SERIALIZATION
+ ////////////////////////////////////////
+ // number of labels
+ *indexOutIterator = src.shape(0);
+ ++indexOutIterator;
+
+ // number of weights
+ *indexOutIterator = src.weightIds_.size();
+ ++indexOutIterator;
+
+ // number of features
+ *indexOutIterator = src.features_.size();
+ ++indexOutIterator;
+
+ // offset serialization
+ for(size_t i=0; i<src.offsets_.size(); ++i){
+ *indexOutIterator = src.offsets_[i];
+ ++indexOutIterator;
+ }
+
+ // weight id serialization
+ for(size_t i=0; i<src.weightIds_.size(); ++i){
+ *indexOutIterator = src.weightIds_[i];
+ ++indexOutIterator;
+ }
+
+ ///////////////////////////////////////
+ /// VALUE SERIALIZATION
+ ////////////////////////////////////////
+ // feature serialization
+ for(size_t i=0; i<src.features_.size(); ++i){
+ *valueOutIterator = src.features_[i];
+ ++valueOutIterator;
+ }
+}
+
+template<class T, class I, class L>
+template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR >
+inline void
+FunctionSerialization<opengm::functions::learnable::LUnary<T, I, L> >::deserialize
+(
+ INDEX_INPUT_ITERATOR indexInIterator,
+ VALUE_INPUT_ITERATOR valueInIterator,
+ opengm::functions::learnable::LUnary<T, I, L> & dst
+) {
+
+
+
+ ///////////////////////////////////////
+ /// INDEX DESERIALIZATION
+ ////////////////////////////////////////
+ // number of labels
+ dst.numberOfLabels_ = *indexInIterator;
+ ++indexInIterator;
+ // resize offset accordingly
+ dst.offsets_.resize(3 * dst.numberOfLabels_);
+
+
+ // number of weights
+ const size_t nW =*indexInIterator;
+ ++indexInIterator;
+ // resize weightIds accordingly
+ dst.weightIds_.resize(nW);
+
+ // number of features
+ const size_t nF = *indexInIterator;
+ ++indexInIterator;
+ // resize weightIds accordingly
+ dst.features_.resize(nF);
+
+ // offset deserialization
+ for(size_t i=0; i<dst.offsets_.size(); ++i){
+ dst.offsets_[i] = *indexInIterator;
+ ++indexInIterator;
+ }
+
+ // weight id deserialization
+ for(size_t i=0; i<dst.weightIds_.size(); ++i){
+ dst.weightIds_[i] = *indexInIterator;
+ ++indexInIterator;
+ }
+
+ ///////////////////////////////////////
+ /// VALUE DESERIALIZATION
+ ////////////////////////////////////////
+ // feature deserialization
+ for(size_t i=0; i<dst.features_.size(); ++i){
+ dst.features_[i] = *valueInIterator;
+ ++valueInIterator;
+ }
+}
+
+} // namespace opengm
+
+#endif // #ifndef OPENGM_LEARNABLE_FUNCTION_HXX
diff --git a/include/opengm/functions/learnable/lweightedsum_of_functions.hxx b/include/opengm/functions/learnable/lweightedsum_of_functions.hxx
new file mode 100644
index 0000000..62314f4
--- /dev/null
+++ b/include/opengm/functions/learnable/lweightedsum_of_functions.hxx
@@ -0,0 +1,264 @@
+#pragma once
+#ifndef OPENGM_LEARNABLE_LWEIGHTEDSUM_OF_FUNCTIONS_FUNCTION_HXX
+#define OPENGM_LEARNABLE_LWEIGHTEDSUM_OF_FUNCTIONS_FUNCTION_HXX
+
+#include <algorithm>
+#include <vector>
+#include <cmath>
+
+#include "opengm/opengm.hxx"
+#include "opengm/functions/function_registration.hxx"
+#include "opengm/functions/function_properties_base.hxx"
+#include "opengm/datastructures/marray/marray.hxx"
+#include "opengm/graphicalmodel/weights.hxx"
+
+namespace opengm {
+namespace functions {
+namespace learnable {
+
+/// Learnable weighted sum of feature-functions
+///
+/// f(x) = \sum_i w(i) * feat(i)(x)
+/// - w = parameter vector
+/// - feat = feature-function vector
+///
+///
+/// \ingroup functions
+template<class T, class I = size_t, class L = size_t>
+class LWeightedSumOfFunctions
+ : public opengm::FunctionBase<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>, T, I, L>
+{
+public:
+ typedef T ValueType;
+ typedef L LabelType;
+ typedef I IndexType;
+
+ LWeightedSumOfFunctions();
+ LWeightedSumOfFunctions(const std::vector<L>& shape,
+ const opengm::learning::Weights<T>& weights,
+ const std::vector<size_t>& weightIDs,
+ const std::vector<marray::Marray<T> >& feat
+ );
+
+ L shape(const size_t) const;
+ size_t size() const;
+ size_t dimension() const;
+ template<class ITERATOR> T operator()(ITERATOR) const;
+
+ // parameters
+ void setWeights(const opengm::learning::Weights<T>& weights) const
+ {weights_ = &weights;}
+ size_t numberOfWeights()const
+ {return weightIDs_.size();}
+ I weightIndex(const size_t weightNumber) const
+ {return weightIDs_[weightNumber];} //dummy
+ template<class ITERATOR>
+ T weightGradient(size_t,ITERATOR) const;
+
+protected:
+ mutable const opengm::learning::Weights<T>* weights_;
+ std::vector<L> shape_;
+ std::vector<size_t> weightIDs_;
+ std::vector<marray::Marray<T> > feat_;
+
+ friend class opengm::FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >;
+};
+
+
+template <class T, class I, class L>
+inline
+LWeightedSumOfFunctions<T, I, L>::LWeightedSumOfFunctions
+(
+ const std::vector<L>& shape,
+ const opengm::learning::Weights<T>& weights,
+ const std::vector<size_t>& weightIDs,
+ const std::vector<marray::Marray<T> >& feat
+ )
+ : shape_(shape), weights_(&weights), weightIDs_(weightIDs),feat_(feat)
+{
+ OPENGM_ASSERT( weightIDs_.size() == feat_.size() );
+ for(size_t i=0; i<weightIDs_.size(); ++i){
+ OPENGM_ASSERT( size() == feat_[i].size() );
+ for(size_t j=0; j<dimension(); ++j)
+ OPENGM_ASSERT( shape_[j] == feat_[i].shape(j))
+ }
+}
+
+template <class T, class I, class L>
+inline
+LWeightedSumOfFunctions<T, I, L>::LWeightedSumOfFunctions()
+ : shape_(std::vector<L>(0)), weightIDs_(std::vector<size_t>(0)), feat_(std::vector<marray::Marray<T> >(0))
+{
+ ;
+}
+
+
+template <class T, class I, class L>
+template <class ITERATOR>
+inline T
+LWeightedSumOfFunctions<T, I, L>::weightGradient
+(
+ size_t weightNumber,
+ ITERATOR begin
+) const {
+ OPENGM_ASSERT(weightNumber< numberOfWeights());
+ return feat_[weightNumber](begin);
+}
+
+template <class T, class I, class L>
+template <class ITERATOR>
+inline T
+LWeightedSumOfFunctions<T, I, L>::operator()
+(
+ ITERATOR begin
+) const {
+ T val = 0;
+ for(size_t i=0;i<numberOfWeights();++i){
+ val += weights_->getWeight(weightIDs_[i]) * weightGradient(i,begin);
+ }
+ return val;
+}
+
+
+template <class T, class I, class L>
+inline L
+LWeightedSumOfFunctions<T, I, L>::shape
+(
+ const size_t i
+) const {
+ return shape_[i];
+}
+
+template <class T, class I, class L>
+inline size_t
+LWeightedSumOfFunctions<T, I, L>::dimension() const {
+ return shape_.size();
+}
+
+template <class T, class I, class L>
+inline size_t
+LWeightedSumOfFunctions<T, I, L>::size() const {
+ size_t s = 1;
+ for(size_t i=0; i<dimension(); ++i)
+ s *=shape_[i];
+ return s;
+}
+
+} // namespace learnable
+} // namespace functions
+
+
+/// FunctionSerialization
+template<class T, class I, class L>
+class FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> > {
+public:
+ typedef typename opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>::ValueType ValueType;
+
+ static size_t indexSequenceSize(const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>&);
+ static size_t valueSequenceSize(const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>&);
+ template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR>
+ static void serialize(const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>&, INDEX_OUTPUT_ITERATOR, VALUE_OUTPUT_ITERATOR);
+ template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR>
+ static void deserialize( INDEX_INPUT_ITERATOR, VALUE_INPUT_ITERATOR, opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L>&);
+};
+
+template<class T, class I, class L>
+struct FunctionRegistration<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> > {
+ enum ID {
+ Id = opengm::FUNCTION_TYPE_ID_OFFSET + 100 + 67
+ };
+};
+
+template<class T, class I, class L>
+inline size_t
+FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >::indexSequenceSize
+(
+ const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> & src
+) {
+ return 1+src.shape_.size()+1+src.weightIDs_.size();
+}
+
+template<class T, class I, class L>
+inline size_t
+FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >::valueSequenceSize
+(
+ const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> & src
+) {
+ return src.feat_.size()*src.size();
+}
+
+template<class T, class I, class L>
+template<class INDEX_OUTPUT_ITERATOR, class VALUE_OUTPUT_ITERATOR >
+inline void
+FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >::serialize
+(
+ const opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> & src,
+ INDEX_OUTPUT_ITERATOR indexOutIterator,
+ VALUE_OUTPUT_ITERATOR valueOutIterator
+) {
+ // save shape
+ *indexOutIterator = src.shape_.size();
+ ++indexOutIterator;
+ for(size_t i=0; i<src.shape_.size();++i){
+ *indexOutIterator = src.shape_[i];
+ ++indexOutIterator;
+ }
+ //save parameter ids
+ *indexOutIterator = src.weightIDs_.size();
+ ++indexOutIterator;
+ for(size_t i=0; i<src.weightIDs_.size();++i){
+ *indexOutIterator = src.weightIDs_[i];
+ ++indexOutIterator;
+ }
+
+ OPENGM_ASSERT_OP(src.weightIDs_.size(), ==, src.feat_.size());
+
+ // save features
+ for(size_t i=0; i<src.weightIDs_.size();++i){
+ for(size_t j=0; j<src.feat_[i].size();++j){
+ *valueOutIterator = src.feat_[i](j);
+ ++valueOutIterator;
+ }
+ }
+}
+
+template<class T, class I, class L>
+template<class INDEX_INPUT_ITERATOR, class VALUE_INPUT_ITERATOR >
+inline void
+FunctionSerialization<opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> >::deserialize
+(
+ INDEX_INPUT_ITERATOR indexInIterator,
+ VALUE_INPUT_ITERATOR valueInIterator,
+ opengm::functions::learnable::LWeightedSumOfFunctions<T, I, L> & dst
+) {
+ //read shape
+ size_t dim = *indexInIterator;
+ size_t size = 1;
+ ++indexInIterator;
+ std::vector<L> shape(dim);
+ for(size_t i=0; i<dim;++i){
+ shape[i] = *indexInIterator;
+ size *= *indexInIterator;
+ ++indexInIterator;
+ }
+ //read parameter ids
+ size_t numW =*indexInIterator;
+ ++indexInIterator;
+ std::vector<size_t> parameterIDs(numW);
+ for(size_t i=0; i<numW;++i){
+ parameterIDs[i] = *indexInIterator;
+ ++indexInIterator;
+ }
+ //read features
+ std::vector<marray::Marray<T> > feat(numW,marray::Marray<T>(shape.begin(),shape.end()));
+ for(size_t i=0; i<numW;++i){
+ for(size_t j=0; j<size;++j){
+ feat[i](j)=*valueInIterator;
+ ++valueInIterator;
+ }
+ }
+}
+
+} // namespace opengm
+
+#endif //OPENGM_LEARNABLE_LWEIGHTEDSUM_OF_FUNCTIONS_FUNCTION_HXX
diff --git a/include/opengm/functions/unary_loss_function.hxx b/include/opengm/functions/unary_loss_function.hxx
new file mode 100644
index 0000000..21f5078
--- /dev/null
+++ b/include/opengm/functions/unary_loss_function.hxx
@@ -0,0 +1,151 @@
+#pragma once
+#ifndef OPENGM_UNARY_LOSS_FUNCTION
+#define OPENGM_UNARY_LOSS_FUNCTION
+
+#include "opengm/functions/function_properties_base.hxx"
+
+namespace opengm {
+
+
+
+
+
+
+
+
+
+/// \endcond
+
+/// UnaryLossFunction convert semi-ring in a lazy fashion
+///
+/// \ingroup functions
+template<class T,class I, class L>
+class UnaryLossFunction
+: public FunctionBase<UnaryLossFunction<T,I,L>, T,I,L>
+{
+public:
+
+ typedef T ValueType;
+ typedef T value_type;
+ typedef I IndexType;
+ typedef L LabelType;
+
+
+ enum LossType{
+ HammingLoss = 0,
+ LabelVectorConf = 1,
+ LabelVectorGt = 2,
+ LabelMatrix = 3,
+ L1Loss = 4,
+ L2Loss = 5
+ };
+
+ struct SharedMultiplers{
+ marray::Marray<ValueType> labelMult_;
+ };
+
+
+
+
+ UnaryLossFunction(
+ const LabelType numberOfLabels,
+ const LabelType gtLabel,
+ const LossType lossType,
+ const ValueType multiplier,
+ const SharedMultiplers & sharedMultiplers,
+ const bool owner
+ );
+ template<class Iterator> ValueType operator()(Iterator begin) const;
+ IndexType shape(const IndexType) const;
+ IndexType dimension() const;
+ IndexType size() const;
+
+private:
+ LabelType numberOfLabels_;
+ LabelType gtLabel_;
+ LossType lossType_;
+ ValueType multiplier_;
+ const SharedMultiplers * sharedMultiplers_;
+ bool owner_;
+};
+
+template<class T,class I, class L>
+inline
+UnaryLossFunction<T,I,L>::UnaryLossFunction(
+ const LabelType numberOfLabels,
+ const LabelType gtLabel,
+ const LossType lossType,
+ const ValueType multiplier,
+ const SharedMultiplers & sharedMultiplers,
+ const bool owner
+)
+: numberOfLabels_(numberOfLabels),
+ gtLabel_(gtLabel),
+ lossType_(lossType),
+ multiplier_(multiplier),
+ sharedMultiplers_(&sharedMultiplers),
+ owner_(owner)
+{
+
+}
+
+template<class T,class I, class L>
+template<class Iterator>
+inline typename UnaryLossFunction<T,I,L>::ValueType
+UnaryLossFunction<T,I,L>::operator()
+(
+ Iterator begin
+) const {
+
+ const LabelType l = *begin;
+ const ValueType isDifferent = (l != gtLabel_ ? 1.0 : 0.0);
+
+ switch(lossType_){
+ case HammingLoss:{
+ return static_cast<ValueType>(-1.0) * multiplier_ * isDifferent;
+ }
+ case LabelVectorConf:{
+ return multiplier_ * isDifferent * sharedMultiplers_->labelMult_(l);
+ }
+ case LabelVectorGt:{
+ return multiplier_ * isDifferent * sharedMultiplers_->labelMult_(gtLabel_);
+ }
+ case LabelMatrix:{
+ return multiplier_ * isDifferent * sharedMultiplers_->labelMult_(l, gtLabel_);
+ }
+ case L1Loss:{
+ return multiplier_ * static_cast<ValueType>(std::abs(int(l)-int(gtLabel_)));
+ }
+ case L2Loss:{
+ return multiplier_ * std::pow(int(l)-int(gtLabel_),2);
+ }
+ default :{
+ throw RuntimeError("wrong loss type");
+ }
+ }
+}
+
+template<class T,class I, class L>
+inline typename UnaryLossFunction<T,I,L>::IndexType
+UnaryLossFunction<T,I,L>::shape
+(
+ const typename UnaryLossFunction<T,I,L>::IndexType index
+) const{
+ return numberOfLabels_;
+}
+
+template<class T,class I, class L>
+inline typename UnaryLossFunction<T,I,L>::IndexType
+UnaryLossFunction<T,I,L>::dimension() const {
+ return 1;
+}
+
+template<class T,class I, class L>
+inline typename UnaryLossFunction<T,I,L>::IndexType
+UnaryLossFunction<T,I,L>::size() const {
+ return numberOfLabels_;
+}
+
+} // namespace opengm
+
+#endif // #ifndef OPENGM_UNARY_LOSS_FUNCTION
diff --git a/include/opengm/graphicalmodel/weights.hxx b/include/opengm/graphicalmodel/weights.hxx
new file mode 100644
index 0000000..ee3ed2a
--- /dev/null
+++ b/include/opengm/graphicalmodel/weights.hxx
@@ -0,0 +1,282 @@
+#ifndef OPENGM_LEARNING_WEIGHTS
+#define OPENGM_LEARNING_WEIGHTS
+
+#include <opengm/opengm.hxx>
+
+namespace opengm{
+namespace learning{
+
+ /*
+ template<class T>
+ class Weights {
+ public:
+
+ typedef T ValueType;
+
+ Weights(const size_t numberOfWeights=0)
+ : weights_(numberOfWeights)
+ {
+
+ }
+
+ ValueType getWeight(const size_t pi)const{
+ OPENGM_ASSERT_OP(pi,<,weights_.size());
+ return weights_[pi];
+ }
+
+ void setWeight(const size_t pi,const ValueType value){
+ OPENGM_ASSERT_OP(pi,<,weights_.size());
+ weights_[pi] = value;
+ }
+
+ const ValueType& operator[](const size_t pi)const{
+ return weights_[pi];
+ }
+
+ ValueType& operator[](const size_t pi) {
+ return weights_[pi];
+ }
+
+ size_t numberOfWeights()const{
+ return weights_.size();
+ }
+
+ size_t size()const{
+ return weights_.size();
+ }
+
+ private:
+
+ std::vector<ValueType> weights_;
+ };
+ */
+ template<class T>
+ class Weights : public marray::Vector<T>
+ {
+ public:
+
+ typedef T ValueType;
+
+ Weights(const size_t numberOfWeights=0)
+ : marray::Vector<T>(numberOfWeights)
+ {
+
+ }
+
+ ValueType getWeight(const size_t pi)const{
+ OPENGM_ASSERT_OP(pi,<,this->size());
+ return (*this)[pi];
+ }
+
+ void setWeight(const size_t pi,const ValueType value){
+ OPENGM_ASSERT_OP(pi,<,this->size());
+ (*this)[pi] = value;
+ }
+
+
+ size_t numberOfWeights()const{
+ return this->size();
+ }
+
+ private:
+
+ //std::vector<ValueType> weights_;
+ };
+
+
+ template<class T>
+ class WeightRegularizer{
+ public:
+ enum RegularizationType{
+ NoRegularizer=-1,
+ L1Regularizer=1,
+ L2Regularizer=2
+ };
+
+ WeightRegularizer(const int regularizationNorm, const double lambda=1.0)
+ : regularizationType_(),
+ lambda_(lambda){
+ if(regularizationNorm==-1){
+ regularizationType_ = NoRegularizer;
+ }
+ else if(regularizationNorm==1){
+ regularizationType_ = L1Regularizer;
+ }
+ else if(regularizationNorm==2){
+ regularizationType_ = L2Regularizer;
+ }
+ else{
+ throw opengm::RuntimeError("regularizationNorm must be -1 (NONE), 1 (L1) or 2 (L2)");
+ }
+ }
+ WeightRegularizer(const RegularizationType regularizationType=L2Regularizer, const double lambda=1.0)
+ : regularizationType_(regularizationType),
+ lambda_(lambda){
+
+ }
+
+ double lambda()const{
+ return lambda_;
+ }
+
+ RegularizationType regularizationType()const{
+ return regularizationType_;
+ }
+
+ int regularizerNorm()const{
+ return static_cast<int>(regularizationType_);
+ }
+
+ double evaluate(const Weights<T> & weights){
+ if(regularizationType_== NoRegularizer){
+ return 0.0;
+ }
+ else if(regularizationType_ == L1Regularizer){
+ double val = 0.0;
+ for(size_t wi=0; wi<weights.size(); ++wi){
+ val += std::abs(weights[wi]);
+ }
+ return val*lambda_;
+ }
+ else { //if(regularizationType_ == L2Regularizer){
+ double val = 0.0;
+ for(size_t wi=0; wi<weights.size(); ++wi){
+ val += std::pow(weights[wi], 2);
+ }
+ return val*lambda_;
+ }
+ }
+
+ private:
+ RegularizationType regularizationType_;
+ double lambda_;
+ };
+
+
+ template<class T>
+ class WeightConstraints{
+ public:
+
+ WeightConstraints(const size_t nWeights = 0)
+ : wLowerBounds_(nWeights,-1.0*std::numeric_limits<T>::infinity()),
+ wUpperBounds_(nWeights, 1.0*std::numeric_limits<T>::infinity()),
+ cLowerBounds_(),
+ cUpperBounds_(),
+ cOffset_(0),
+ cStart_(),
+ cSize_(),
+ cIndices_(),
+ cCoeff_(){
+
+ }
+ template<class ITER_LB, class ITER_UB>
+ WeightConstraints(ITER_LB lbBegin, ITER_LB lbEnd, ITER_UB ubBegin)
+ : wLowerBounds_(lbBegin,lbEnd),
+ wUpperBounds_(ubBegin, ubBegin + std::distance(lbBegin, lbEnd)),
+ cLowerBounds_(),
+ cUpperBounds_(),
+ cOffset_(0),
+ cStart_(),
+ cSize_(),
+ cIndices_(),
+ cCoeff_()
+ {
+
+ }
+ // query
+ size_t numberOfConstraints()const{
+ return cStart_.size();
+ }
+
+ T weightLowerBound(const size_t wi)const{
+ return wLowerBounds_[wi];
+ }
+ T weightUpperBound(const size_t wi)const{
+ return wUpperBounds_[wi];
+ }
+
+ const std::vector<T> & weightLowerBounds()const{
+ return wLowerBounds_;
+ }
+ const std::vector<T> & weightUpperBounds()const{
+ return wUpperBounds_;
+ }
+
+
+ size_t constraintSize(const size_t ci)const{
+ return cSize_[ci];
+ }
+ T constraintLowerBound(const size_t ci)const{
+ return cLowerBounds_[ci];
+ }
+ T constraintUpperBound(const size_t ci)const{
+ return cUpperBounds_[ci];
+ }
+
+ const std::vector<size_t> & constraintSizes()const{
+ return cLowerBounds_;
+ }
+ const std::vector<T> & constraintLowerBounds()const{
+ return cLowerBounds_;
+ }
+ const std::vector<T> & constraintUpperBounds()const{
+ return cUpperBounds_;
+ }
+
+ // modification
+ template<class ITER_LB>
+ void setLowerBounds(ITER_LB lbBegin, ITER_LB lbEnd){
+ wLowerBounds_.assign(lbBegin, lbEnd);
+ }
+
+ template<class ITER_UB>
+ void setUpperBounds(ITER_UB ubBegin, ITER_UB ubEnd){
+ wUpperBounds_.assign(ubBegin, ubEnd);
+ }
+
+ template<class ITER_INDICES, class ITER_COEFF>
+ void addConstraint(ITER_INDICES indicesBegin, ITER_INDICES indicesEnd, ITER_COEFF coeffBegin, const T lowerBound, const T upperBound){
+ // length of this constraint
+ const size_t cSize = std::distance(indicesBegin, indicesEnd);
+ // store length of constraint
+ cSize_.push_back(cSize);
+
+ // store offset / index in 'cIndices_' and 'cCoeff_'
+ cStart_.push_back(cOffset_);
+
+ // increment the cOffset_ for the next constraint which
+ // could be added by the user
+ cOffset_ +=cSize;
+
+ // copy indices and coefficients
+ for( ;indicesBegin!=indicesEnd; ++indicesBegin,++coeffBegin){
+ cIndices_.push_back(*indicesBegin);
+ cCoeff_.push_back(*coeffBegin);
+ }
+ }
+
+ private:
+ // w upper-lower bound
+ std::vector<T> wLowerBounds_;
+ std::vector<T> wUpperBounds_;
+ // constraints
+ std::vector<T> cLowerBounds_;
+ std::vector<T> cUpperBounds_;
+
+ size_t cOffset_;
+ std::vector<size_t> cStart_;
+ std::vector<size_t> cSize_;
+ std::vector<size_t> cIndices_;
+ std::vector<T> cCoeff_;
+ };
+
+
+} // namespace learning
+} // namespace opengm
+
+
+
+
+
+
+#endif /* OPENGM_LEARNING_WEIGHTS */
diff --git a/src/unittest/test_learnable_functions.cxx b/src/unittest/test_learnable_functions.cxx
new file mode 100644
index 0000000..012f752
--- /dev/null
+++ b/src/unittest/test_learnable_functions.cxx
@@ -0,0 +1,79 @@
+#include <vector>
+
+#include "opengm/functions/learnable/lpotts.hxx"
+#include <opengm/unittests/test.hxx>
+
+template<class T>
+struct LearnableFunctionsTest {
+ typedef size_t LabelType;
+ typedef size_t IndexType;
+ typedef T ValueType;
+
+ void testLPotts(){
+ std::cout << " * LearnablePotts ..." << std::endl;
+
+ std::cout << " - test basics ..." <<std::flush;
+ // parameter
+ const size_t numparam = 1;
+ opengm::learning::Weights<ValueType> param(numparam);
+ param.setWeight(0,5.0);
+
+ LabelType numL = 3;
+ std::vector<size_t> pIds(1,0);
+ std::vector<ValueType> feat(1,1);
+ // function
+ opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> f(param,numL,pIds,feat);
+
+ LabelType l[] ={0,0};
+ for(l[0]=0;l[0]<numL;++l[0]){
+ for(l[1]=0;l[1]<numL;++l[1]){
+ if(l[0]==l[1]){
+ OPENGM_TEST_EQUAL_TOLERANCE(f(l),0, 0.0001);
+ }else{
+ OPENGM_TEST_EQUAL_TOLERANCE(f(l),5.0, 0.0001);
+ }
+ }
+ }
+ std::cout << " OK" << std::endl;
+ std::cout << " - test serializations ..." <<std::flush;
+ {
+ typedef opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType> FUNCTION;
+ const size_t sizeIndices=opengm::FunctionSerialization<FUNCTION>::indexSequenceSize(f);
+ const size_t sizeValues=opengm::FunctionSerialization<FUNCTION>::valueSequenceSize(f);
+ std::vector<long long unsigned> indices(sizeIndices);
+ std::vector<T> values(sizeValues);
+
+ opengm::FunctionSerialization<FUNCTION>::serialize(f,indices.begin(),values.begin());
+ FUNCTION f2;
+ opengm::FunctionSerialization<FUNCTION>::deserialize(indices.begin(),values.begin(),f2);
+ f2.setWeights(param);
+
+ OPENGM_TEST(f.dimension()==f2.dimension());
+ OPENGM_TEST(f.size() == f2.size());
+ std::vector<size_t> shape(f.dimension());
+ for(size_t i=0;i<f.dimension();++i) {
+ shape[i]=f.shape(i);
+ OPENGM_TEST(f.shape(i)==f2.shape(i));
+ }
+ opengm::ShapeWalker<std::vector<size_t>::const_iterator > walker(shape.begin(),f.dimension());
+ for(size_t i=0;i<f.size();++i) {
+ OPENGM_TEST(walker.coordinateTuple().size()==f.dimension());
+ OPENGM_TEST(f(walker.coordinateTuple().begin())==f2(walker.coordinateTuple().begin()) );
+ ++walker;
+ }
+ }
+ std::cout << " OK" << std::endl;
+ }
+
+};
+
+
+int main() {
+ std::cout << "Learnable Functions test... " << std::endl;
+ {
+ LearnableFunctionsTest<double>t;
+ t.testLPotts();
+ }
+ std::cout << "done.." << std::endl;
+ return 0;
+}
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git
More information about the debian-science-commits
mailing list