[opengm] 52/386: finished implementation (not testing) of StructMaxMargin

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:35:04 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 0da9f7cda553dbe901673bee70a2709aa9063eba
Author: Jan Funke <funke at ini.ch>
Date:   Mon Dec 15 16:34:25 2014 +0100

    finished implementation (not testing) of StructMaxMargin
---
 include/opengm/functions/learnable/lpotts.hxx    |   2 +-
 include/opengm/learning/bundle-optimizer.hxx     |   8 +-
 include/opengm/learning/gradient-accumulator.hxx |  50 ++++++++++
 include/opengm/learning/struct-max-margin.hxx    |  41 +++++---
 src/unittest/test_learning.cxx                   | 116 +++++++++++------------
 5 files changed, 141 insertions(+), 76 deletions(-)

diff --git a/include/opengm/functions/learnable/lpotts.hxx b/include/opengm/functions/learnable/lpotts.hxx
index 0c2c9ba..9a42831 100644
--- a/include/opengm/functions/learnable/lpotts.hxx
+++ b/include/opengm/functions/learnable/lpotts.hxx
@@ -130,7 +130,7 @@ LPotts<T, I, L>::operator()
 ) const {
    T val = 0;
    for(size_t i=0;i<numberOfWeights();++i){
-      val += weights_->getWeight(i) * weightGradient(i,begin);
+      val += weights_->getWeight(weightIDs_[i]) * weightGradient(i,begin);
    }
    return val;
 }
diff --git a/include/opengm/learning/bundle-optimizer.hxx b/include/opengm/learning/bundle-optimizer.hxx
index bc2de7d..bc8e1bd 100644
--- a/include/opengm/learning/bundle-optimizer.hxx
+++ b/include/opengm/learning/bundle-optimizer.hxx
@@ -137,13 +137,13 @@ BundleOptimizer<T>::optimize(Oracle& oracle, Weights& w) {
 		// get current value and gradient
 		oracle(w_tm1, L_w_tm1, a_t);
 
-		//std::cout << "       L(w)              is: " << L_w_tm1 << std::endl;
+		std::cout << "       L(w)              is: " << L_w_tm1 << std::endl;
 		//LOG_ALL(bundlelog)   << "      ∂L(w)/∂            is: " << a_t << std::endl;
 
 		// update smallest observed value of regularized L
 		minValue = std::min(minValue, L_w_tm1 + _parameter.lambda*0.5*dot(w_tm1, w_tm1));
 
-		//std::cout << " min_i L(w_i) + ½λ|w_i|² is: " << minValue << std::endl;
+		std::cout << " min_i L(w_i) + ½λ|w_i|² is: " << minValue << std::endl;
 
 		// compute hyperplane offset
 		T b_t = L_w_tm1 - dot(w_tm1, a_t);
@@ -159,13 +159,13 @@ BundleOptimizer<T>::optimize(Oracle& oracle, Weights& w) {
 		// update w and get minimal value
 		findMinLowerBound(w, minLower);
 
-		//std::cout << " min_w ℒ(w)   + ½λ|w|²   is: " << minLower << std::endl;
+		std::cout << " min_w ℒ(w)   + ½λ|w|²   is: " << minLower << std::endl;
 		//std::cout << " w* of ℒ(w)   + ½λ|w|²   is: "  << w << std::endl;
 
 		// compute gap
 		T eps_t = minValue - minLower;
 
-		//std::cout  << "          ε   is: " << eps_t << std::endl;
+		std::cout  << "          ε   is: " << eps_t << std::endl;
 
 		// converged?
 		if (eps_t <= _parameter.min_gap)
diff --git a/include/opengm/learning/gradient-accumulator.hxx b/include/opengm/learning/gradient-accumulator.hxx
new file mode 100644
index 0000000..fd54314
--- /dev/null
+++ b/include/opengm/learning/gradient-accumulator.hxx
@@ -0,0 +1,50 @@
+#ifndef OPENGM_LEARNING_GRADIENT_ACCUMULATOR_H__
+#define OPENGM_LEARNING_GRADIENT_ACCUMULATOR_H__
+
+namespace opengm {
+namespace learning {
+
+/**
+ * Model function visitor to accumulate the gradient for each model weight, 
+ * given a configuration.
+ */
+template <typename ModelWeights, typename ConfigurationType>
+class GradientAccumulator {
+
+public:
+
+	/**
+	 * @param gradient
+	 *              ModelWeights reference to store the gradients.
+	 * @param configuration
+	 *              Current configuration of the variables in the model.
+	 */
+	GradientAccumulator(ModelWeights& gradient, ConfigurationType& configuration) :
+		_gradient(gradient),
+		_configuration(configuration) {
+
+		for (size_t i = 0; i < gradient.numberOfWeights(); i++)
+			gradient[i] = 0;
+	}
+
+	template <typename FunctionType>
+	void operator()(const FunctionType& function) {
+
+		for (int i = 0; i < function.numberOfWeights(); i++) {
+
+			int index = function.weightIndex(i);
+
+			_gradient[index] += function.weightGradient(i, _configuration.begin());
+		}
+	}
+
+private:
+
+	ModelWeights& _gradient;
+	ConfigurationType& _configuration;
+};
+
+}} // namespace opengm::learning
+
+#endif // OPENGM_LEARNING_GRADIENT_ACCUMULATOR_H__
+
diff --git a/include/opengm/learning/struct-max-margin.hxx b/include/opengm/learning/struct-max-margin.hxx
index 1ee2994..a6d0321 100644
--- a/include/opengm/learning/struct-max-margin.hxx
+++ b/include/opengm/learning/struct-max-margin.hxx
@@ -2,9 +2,8 @@
 #ifndef OPENGM_LEARNING_STRUCT_MAX_MARGIN_HXX
 #define OPENGM_LEARNING_STRUCT_MAX_MARGIN_HXX
 
-// uncomment when dataset is done
-//#include "dataset.hxx"
 #include "bundle-optimizer.hxx"
+#include "gradient-accumulator.hxx"
 
 namespace opengm {
 
@@ -55,8 +54,8 @@ private:
 
 		public:
 
-			Oracle(DatasetType& dataset) {
-			}
+			Oracle(DatasetType& dataset) :
+				_dataset(dataset) {}
 
 			/**
 			 * Evaluate the loss-augmented energy value of the dataset and its 
@@ -64,17 +63,37 @@ private:
 			 */
             void operator()(const Weights& w, double& value, Weights& gradient) {
 
+				typedef std::vector<typename InferenceType::LabelType> ConfigurationType;
+
+				// initialize gradient with zero
+
 				for (int i = 0; i < _dataset.getNumberOfModels(); i++) {
 
-					InferenceType inference(_dataset.getModel(i));
+					// NOT IMPLEMENTED, YET
+					//_dataset.lockModel(i);
+					//const typename DatasetType::GMWITHLOSS& gm = _dataset.getModelWithLoss(i);
+					const typename DatasetType::GMType& gm = _dataset.getModel(i);
+
+					_dataset.getWeights() = w;
+
+					InferenceType inference(gm);
 
-					// TODO: perform infernce, get gradient from MAP
+					ConfigurationType configuration;
+					inference.infer();
+					inference.arg(configuration);
+
+					GradientAccumulator<Weights, ConfigurationType> ga(gradient, configuration);
+					for (size_t i = 0; i < gm.numberOfFactors(); i++)
+						gm[i].callFunctor(ga);
+
+					// NOT IMPLEMENTED, YET
+					//_dataset.unlockModel(i);
 				}
 			}
 
 		private:
 
-			DatasetType _dataset;
+			DatasetType& _dataset;
 	};
 
 	DatasetType& _dataset;
@@ -91,14 +110,10 @@ template <typename InfereneType>
 void
 StructMaxMargin<DS, LG, O>::learn(typename InfereneType::Parameter& infParams) {
 
-	// create a loss-augmented copy of the dataset
-	DS augmentedDataset = _dataset;
-	LossGeneratorType loss;
-	for (unsigned int i = 0; i < augmentedDataset.getNumberOfModels(); i++)
-		loss.addLoss(augmentedDataset.getModel(i), augmentedDataset.getGT(i).begin());
-
 	Oracle<InfereneType> oracle(_dataset);
 
+	_weights = _dataset.getWeights();
+
 	// minimize structured loss
     OptimizerResult result = _optimizer.optimize(oracle, _weights);
 
diff --git a/src/unittest/test_learning.cxx b/src/unittest/test_learning.cxx
index a8b9b78..e45b2ab 100644
--- a/src/unittest/test_learning.cxx
+++ b/src/unittest/test_learning.cxx
@@ -1,62 +1,62 @@
-#include "opengm/unittests/test.hxx"
-#include "opengm/graphicalmodel/graphicalmodel.hxx"
-#include "opengm/operations/adder.hxx"
-#include "opengm/learning/struct-max-margin.hxx"
-#include "opengm/learning/dataset/testdataset.hxx"
-#include "opengm/learning/loss/hammingloss.hxx"
-#include "opengm/inference/bruteforce.hxx"
-
-template<class T>
-struct LearningTest {
-
-	typedef T                                                                  ValueType;
-	typedef OPENGM_TYPELIST_2(
-			opengm::ExplicitFunction<T>,
-			opengm::functions::learnable::LPotts<T>)                           FunctionTypeList;
-	typedef opengm::GraphicalModel<ValueType, opengm::Adder, FunctionTypeList> GraphicalModelType;
-	typedef opengm::datasets::TestDataset<GraphicalModelType>                  DatasetType;
-    typedef typename DatasetType::Weights                              Weights;
-	typedef opengm::learning::HammingLoss                                      LossGeneratorType;
-	typedef opengm::Bruteforce<GraphicalModelType,opengm::Minimizer>           InferenceType;
-
-	void testStructMaxMargin() {
-
-		// create a dataset
-		DatasetType dataset;
-
-		// create a learning algorithm
-		opengm::learning::StructMaxMargin<DatasetType, LossGeneratorType> structMaxMargin(dataset);
-
-		// train
-		typename InferenceType::Parameter infParams;
-		structMaxMargin.template learn<InferenceType>(infParams);
-
-		// get the result
-        const Weights& learntParameters = structMaxMargin.getWeights();
-                std::cout << learntParameters.numberOfWeights()<<std::endl;
-                for(size_t i=0; i<learntParameters.numberOfWeights();++i)
-                   std::cout << learntParameters[i] << " ";
-                std::cout << std::endl;
-	}
-
-	void run() {
-
-		this->testStructMaxMargin();
+#include <vector>
+
+#include <opengm/functions/explicit_function.hxx>
+#include <opengm/unittests/test.hxx>
+#include <opengm/graphicalmodel/graphicalmodel.hxx>
+#include <opengm/operations/adder.hxx>
+#include <opengm/operations/minimizer.hxx>
+#include <opengm/inference/icm.hxx>
+#include <opengm/utilities/metaprogramming.hxx>
+
+#include <opengm/functions/learnable/lpotts.hxx>
+#include <opengm/functions/learnable/sum_of_experts.hxx>
+#include <opengm/learning/struct-max-margin.hxx>
+#include <opengm/learning/loss/hammingloss.hxx>
+#include <opengm/learning/dataset/testdataset.hxx>
+#include <opengm/learning/dataset/testdataset2.hxx>
+
+
+//*************************************
+typedef double ValueType;
+typedef size_t IndexType;
+typedef size_t LabelType; 
+typedef opengm::meta::TypeListGenerator<opengm::ExplicitFunction<ValueType,IndexType,LabelType>, opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>, opengm::functions::learnable::SumOfExperts<ValueType,IndexType,LabelType> >::type FunctionListType;
+typedef opengm::GraphicalModel<ValueType,opengm::Adder, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GM; 
+typedef opengm::datasets::TestDataset<GM>  DS;
+typedef opengm::datasets::TestDataset2<GM> DS2;
+typedef opengm::learning::HammingLoss     LOSS;
+typedef opengm::ICM<GM,opengm::Minimizer> INF;
+
+//*************************************
+
+
+int main() {
+   std::cout << " Includes are fine :-) " << std::endl; 
+  
+   {
+      DS dataset;
+      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
+      
+      
+      opengm::learning::StructMaxMargin<DS,LOSS> learner(dataset);
+      
+      
+      INF::Parameter infPara;
+      learner.learn<INF>(infPara);
+      
    }
-};
-
-int
-main() {
-   std::cout << "Learning test...  " << std::endl;
-   //{
-   //   LearningTest<float >t;
-   //   t.run();
-   //}
+  
    {
-      LearningTest<double >t;
-      t.run();
+      DS2 dataset;
+      std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
+      
+      
+      opengm::learning::StructMaxMargin<DS2,LOSS> learner(dataset);
+      
+      
+      INF::Parameter infPara;
+      learner.learn<INF>(infPara);
    }
-   std::cout << "done.." << std::endl;
-   return 0;
-}
 
+
+}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list