[opengm] 223/386: minor changes
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:53 UTC 2016
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch debian/master
in repository opengm.
commit 6e89b492025ebd06e355384900147dc1aa39acdb
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date: Tue Jan 13 08:44:24 2015 +0100
minor changes
---
include/opengm/graphicalmodel/weights.hxx | 190 +++++++++++++++++++--
src/interfaces/python/opengm/learning/learning.cxx | 3 +-
.../python/opengm/learning/pyWeights.cxx | 9 +
3 files changed, 190 insertions(+), 12 deletions(-)
diff --git a/include/opengm/graphicalmodel/weights.hxx b/include/opengm/graphicalmodel/weights.hxx
index 395b82d..9e0269e 100644
--- a/include/opengm/graphicalmodel/weights.hxx
+++ b/include/opengm/graphicalmodel/weights.hxx
@@ -73,27 +73,195 @@ namespace learning{
(*this)[pi] = value;
}
- //const ValueType& operator[](const size_t pi)const{
- // return weights_[pi];
- //}
-
- //ValueType& operator[](const size_t pi) {
- // return weights_[pi];
- //}
size_t numberOfWeights()const{
return this->size();
}
- //size_t size()const{
- // return weights_.size();
- //}
-
private:
//std::vector<ValueType> weights_;
};
+
+ template<class T>
+ class WeightLoss{
+ public:
+ enum RegularizationType{
+ NoRegularizer=-1,
+ L1Regularizer=1,
+ L2Regularizer=2
+ };
+
+ WeightLoss(const int regularizationNorm=2, const double lambda=1.0)
+ : regularizationType_(),
+ lambda_(lambda){
+ if(regularizationNorm==-1){
+ regularizationType_ = NoRegularizer;
+ }
+ }
+ WeightLoss(const RegularizationType regularizationType=L2Regularizer, const double lambda=1.0)
+ : regularizationType_(regularizationType),
+ lambda_(lambda){
+
+ }
+
+ double lambda()const{
+ return lambda_;
+ }
+
+ RegularizationType regularizationType()const{
+ return regularizationType_;
+ }
+
+ int regularizerNorm()const{
+ return static_cast<int>(regularizationType_);
+ }
+
+ double evaluate(const Weights<T> & weights){
+ if(regularizationType_== NoRegularizer){
+ return 0.0;
+ }
+ else if(regularizationType_ == L1Regularizer){
+ double val = 0.0;
+ for(size_t wi=0; wi<weights.size(); ++wi){
+ val += std::abs(weights[wi]);
+ }
+ return val;
+ }
+ else { //if(regularizationType_ == L2Regularizer){
+ double val = 0.0;
+ for(size_t wi=0; wi<weights.size(); ++wi){
+ val += std::pow(weights[wi], 2);
+ }
+ return val;
+ }
+ }
+
+ private:
+ RegularizationType regularizationType_;
+ double lambda_;
+ };
+
+
+ template<class T>
+ class WeightConstraints{
+ public:
+
+ WeightConstraints(const size_t nWeights = 0)
+ : wLowerBounds_(nWeights,-1.0*std::numeric_limits<T>::infinity()),
+ wUpperBounds_(nWeights, 1.0*std::numeric_limits<T>::infinity()),
+ cLowerBounds_(),
+ cUpperBounds_(),
+ cOffset_(0),
+ cStart_(),
+ cSize_(),
+ cIndices_(),
+ cCoeff_(){
+
+ }
+ template<class ITER_LB, class ITER_UB>
+ WeightConstraints(ITER_LB lbBegin, ITER_LB lbEnd, ITER_UB ubBegin)
+ : wLowerBounds_(lbBegin,lbEnd),
+ wUpperBounds_(ubBegin, ubBegin + std::distance(lbBegin, lbEnd)),
+ cLowerBounds_(),
+ cUpperBounds_(),
+ cOffset_(0),
+ cStart_(),
+ cSize_(),
+ cIndices_(),
+ cCoeff_()
+ {
+
+ }
+ // query
+ size_t numberOfConstraints()const{
+ return cStart_.size();
+ }
+
+ T weightLowerBound(const size_t wi)const{
+ return wLowerBounds_[wi];
+ }
+ T weightUpperBound(const size_t wi)const{
+ return wUpperBounds_[wi];
+ }
+
+ const std::vector<T> & weightLowerBounds()const{
+ return wLowerBounds_;
+ }
+ const std::vector<T> & weightUpperBounds()const{
+ return wUpperBounds_;
+ }
+
+
+ size_t constraintSize(const size_t ci)const{
+ return cSize_[ci];
+ }
+ T constraintLowerBound(const size_t ci)const{
+ return cLowerBounds_[ci];
+ }
+ T constraintUpperBound(const size_t ci)const{
+ return cUpperBounds_[ci];
+ }
+
+ const std::vector<size_t> & constraintSizes()const{
+ return cLowerBounds_;
+ }
+ const std::vector<T> & constraintLowerBounds()const{
+ return cLowerBounds_;
+ }
+ const std::vector<T> & constraintUpperBounds()const{
+ return cUpperBounds_;
+ }
+
+ // modification
+ template<class ITER_LB>
+ void setLowerBounds(ITER_LB lbBegin, ITER_LB lbEnd){
+ wLowerBounds_.assign(lbBegin, lbEnd);
+ }
+
+ template<class ITER_UB>
+ void setUpperBounds(ITER_UB ubBegin, ITER_UB ubEnd){
+ wUpperBounds_.assign(ubBegin, ubEnd);
+ }
+
+ template<class ITER_INDICES, class ITER_COEFF>
+ void addConstraint(ITER_INDICES indicesBegin, ITER_INDICES indicesEnd, ITER_COEFF coeffBegin, const T lowerBound, const T upperBound){
+ // length of this constraint
+ const size_t cSize = std::distance(indicesBegin, indicesEnd);
+ // store length of constraint
+ cSize_.push_back(cSize);
+
+ // store offset / index in 'cIndices_' and 'cCoeff_'
+ cStart_.push_back(cOffset_);
+
+ // increment the cOffset_ for the next constraint which
+ // could be added by the user
+ cOffset_ +=cSize;
+
+ // copy indices and coefficients
+ for( ;indicesBegin!=indicesEnd; ++indicesBegin,++coeffBegin){
+ cIndices_.push_back(*indicesBegin);
+ cCoeff_.push_back(*coeffBegin);
+ }
+ }
+
+ private:
+ // w upper-lower bound
+ std::vector<T> wLowerBounds_;
+ std::vector<T> wUpperBounds_;
+ // constraints
+ std::vector<T> cLowerBounds_;
+ std::vector<T> cUpperBounds_;
+
+ size_t cOffset_;
+ std::vector<size_t> cStart_;
+ std::vector<size_t> cSize_;
+ std::vector<size_t> cIndices_;
+ std::vector<T> cCoeff_;
+ };
+
+
} // namespace learning
} // namespace opengm
diff --git a/src/interfaces/python/opengm/learning/learning.cxx b/src/interfaces/python/opengm/learning/learning.cxx
index ad80980..4cbc994 100644
--- a/src/interfaces/python/opengm/learning/learning.cxx
+++ b/src/interfaces/python/opengm/learning/learning.cxx
@@ -23,6 +23,7 @@ namespace ol = opengm::learning;
namespace opengm{
void export_weights();
+ void export_weight_constraints();
template<class GM, class LOSS>
void export_dataset(const std::string& className);
@@ -63,7 +64,7 @@ BOOST_PYTHON_MODULE_INIT(_learning) {
opengm::export_weights();
-
+ opengm::export_weight_constraints();
// function exporter
opengm::export_lfunction_generator<op::GmAdder,op::GmMultiplier>();
diff --git a/src/interfaces/python/opengm/learning/pyWeights.cxx b/src/interfaces/python/opengm/learning/pyWeights.cxx
index 9aadaed..44cf9e4 100644
--- a/src/interfaces/python/opengm/learning/pyWeights.cxx
+++ b/src/interfaces/python/opengm/learning/pyWeights.cxx
@@ -31,6 +31,15 @@ namespace opengm{
;
}
+ void export_weight_constraints(){
+ typedef python::GmValueType V;
+ typedef learning::WeightConstraints<V> Weights;
+ boost::python::class_<Weights>("Weights",boost::python::init<const size_t >())
+ //.def("__init__", make_constructor(&pyWeightsConstructor<V> ,boost::python::default_call_policies()))
+ //.def("__getitem__", &Weights::getWeight)
+ //.def("__setitem__", &Weights::setWeight)
+ ;
+ }
}
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git
More information about the debian-science-commits
mailing list