[opengm] 286/386: paralellization added to struct-max-margin (triggered by WITH_OPENMP)

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:12 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit b04442e752b14521cfbfe1a9669fe01676009074
Author: Janez Ales <janez.ales at iwr.uni-heidelberg.de>
Date:   Tue Nov 3 18:47:21 2015 +0100

    paralellization added to struct-max-margin (triggered by WITH_OPENMP)
---
 include/opengm/learning/gradient-accumulator.hxx |  2 +-
 include/opengm/learning/struct-max-margin.hxx    | 38 ++++++++++++++++++++----
 2 files changed, 33 insertions(+), 7 deletions(-)

diff --git a/include/opengm/learning/gradient-accumulator.hxx b/include/opengm/learning/gradient-accumulator.hxx
index a13473d..88e920a 100644
--- a/include/opengm/learning/gradient-accumulator.hxx
+++ b/include/opengm/learning/gradient-accumulator.hxx
@@ -124,7 +124,7 @@ struct FeatureAccumulator{
 
                 const Accessor accessorGt(begin, end, gtLabel_);
                 const Accessor accessorMap(begin, end, mapLabel_);
-                
+
                 if(add_){
                     // for gt label
                     accWeights_[gwi] += weight_*f.weightGradient(wi, AccessorIter(accessorGt, 0));
diff --git a/include/opengm/learning/struct-max-margin.hxx b/include/opengm/learning/struct-max-margin.hxx
index 04f3ffd..6ff2685 100644
--- a/include/opengm/learning/struct-max-margin.hxx
+++ b/include/opengm/learning/struct-max-margin.hxx
@@ -5,6 +5,10 @@
 #include "bundle-optimizer.hxx"
 #include "gradient-accumulator.hxx"
 
+#ifdef WITH_OPENMP
+#include <omp.h>
+#endif
+
 namespace opengm {
 
 namespace learning {
@@ -100,12 +104,25 @@ private:
 				// set the weights w in E(x,y) and F(x,y)
 				_dataset.getWeights() = w;
                 std::cout << std::endl << " MODEL : ";
-				for (int i = 0; i < _dataset.getNumberOfModels(); i++) {
-                    std::cout << i << " ";
 
-                    // get E(x,y) and F(x,y)
+                #ifdef WITH_OPENMP
+                omp_lock_t modelLock;
+                omp_init_lock(&modelLock);
+                #pragma omp parallel for
+                #endif
+                for (int i = 0; i < _dataset.getNumberOfModels(); i++) {
+                    std::cout << i;
+
+                    // lock the model
+                    #ifdef WITH_OPENMP
+                    omp_set_lock(&modelLock);
                     _dataset.lockModel(i);
-					const GMType &     gm  = _dataset.getModel(i);
+                    omp_unset_lock(&modelLock);
+                    #else
+                    _dataset.lockModel(i);
+                    #endif
+                    // get E(x,y) and F(x,y)
+                    const GMType &     gm  = _dataset.getModel(i);
 					const GMWITHLOSS & gml = _dataset.getModelWithLoss(i);
 
 					// get the best-effort solution y'
@@ -122,6 +139,7 @@ private:
                     inference.arg(mostViolated);
 
 					// the optimal value of (1) is now c - F(y*,w)
+                    #pragma omp atomic
                     value += c - gml.evaluate(mostViolated);
 
 					// the gradients are
@@ -133,11 +151,19 @@ private:
 						gm[j].callViFunctor(gaBestEffort);
 						gm[j].callViFunctor(gaMostViolated);
 					}
+
+                    // unlock the model
+                    #ifdef WITH_OPENMP
+                    omp_set_lock(&modelLock);
+                    _dataset.unlockModel(i);
+                    omp_unset_lock(&modelLock);
+                    #else
                     _dataset.unlockModel(i);
-				}
-                std::cout << std::endl;
+                    #endif
+                } // end for model
 			}
 
+
 		private:
 
 			DatasetType& _dataset;

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list