[opengm] 284/386: lweightedsum_of_functions OPENGM_ASSERT corrected

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:11 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit e19025f6d4832ac92a6868d1f1b8a217528be309
Author: Janez Ales <janez.ales at iwr.uni-heidelberg.de>
Date:   Wed Oct 28 21:45:08 2015 +0100

    lweightedsum_of_functions OPENGM_ASSERT corrected
---
 include/opengm/functions/learnable/lweightedsum_of_functions.hxx | 5 +++--
 include/opengm/learning/gradient-accumulator.hxx                 | 6 ------
 include/opengm/learning/struct-max-margin.hxx                    | 6 +++---
 3 files changed, 6 insertions(+), 11 deletions(-)

diff --git a/include/opengm/functions/learnable/lweightedsum_of_functions.hxx b/include/opengm/functions/learnable/lweightedsum_of_functions.hxx
index 5978c52..62314f4 100644
--- a/include/opengm/functions/learnable/lweightedsum_of_functions.hxx
+++ b/include/opengm/functions/learnable/lweightedsum_of_functions.hxx
@@ -77,10 +77,11 @@ LWeightedSumOfFunctions<T, I, L>::LWeightedSumOfFunctions
    :   shape_(shape), weights_(&weights), weightIDs_(weightIDs),feat_(feat)
 {
    OPENGM_ASSERT( weightIDs_.size() == feat_.size() );
-   for(size_t i=0; i<weightIDs_.size(); ++i)
+   for(size_t i=0; i<weightIDs_.size(); ++i){
       OPENGM_ASSERT( size() == feat_[i].size() );
       for(size_t j=0; j<dimension(); ++j)
-          OPENGM_ASSERT( shape(j) == feat_[i].shape(j))
+          OPENGM_ASSERT( shape_[j] == feat_[i].shape(j))
+   }
 }
 
 template <class T, class I, class L>
diff --git a/include/opengm/learning/gradient-accumulator.hxx b/include/opengm/learning/gradient-accumulator.hxx
index 43c9cf8..a13473d 100644
--- a/include/opengm/learning/gradient-accumulator.hxx
+++ b/include/opengm/learning/gradient-accumulator.hxx
@@ -97,8 +97,6 @@ struct FeatureAccumulator{
     }
 
     void resetWeights(){
-        //accFeaturesGt_ = 0.0;
-        //accWeights_ = 0.0;
         for(size_t i=0; i<accWeights_.size(); ++i){
             accWeights_[i] = 0.0;
         }
@@ -127,7 +125,6 @@ struct FeatureAccumulator{
                 const Accessor accessorGt(begin, end, gtLabel_);
                 const Accessor accessorMap(begin, end, mapLabel_);
                 
-
                 if(add_){
                     // for gt label
                     accWeights_[gwi] += weight_*f.weightGradient(wi, AccessorIter(accessorGt, 0));
@@ -172,9 +169,6 @@ struct FeatureAccumulator{
     double weight_;
 };
 
-
-
-
 }} // namespace opengm::learning
 
 #endif // OPENGM_LEARNING_GRADIENT_ACCUMULATOR_H__
diff --git a/include/opengm/learning/struct-max-margin.hxx b/include/opengm/learning/struct-max-margin.hxx
index f3d0b72..04f3ffd 100644
--- a/include/opengm/learning/struct-max-margin.hxx
+++ b/include/opengm/learning/struct-max-margin.hxx
@@ -104,8 +104,7 @@ private:
                     std::cout << i << " ";
 
                     // get E(x,y) and F(x,y)
-					//std::cout << "locking model " << i << " of " << _dataset.getNumberOfModels() <<  std::endl;
-					_dataset.lockModel(i);
+                    _dataset.lockModel(i);
 					const GMType &     gm  = _dataset.getModel(i);
 					const GMWITHLOSS & gml = _dataset.getModelWithLoss(i);
 
@@ -118,8 +117,9 @@ private:
 					// find the minimizer y* of F(y,w)
 					ConfigurationType mostViolated;
                     InferenceType inference(gml, _infParam);
+
                     inference.infer();
-					inference.arg(mostViolated);
+                    inference.arg(mostViolated);
 
 					// the optimal value of (1) is now c - F(y*,w)
                     value += c - gml.evaluate(mostViolated);

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list