[opengm] 95/386: bugfix: gradient-accumulator needs to pass local configuration to function
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:35:10 UTC 2016
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch debian/master
in repository opengm.
commit d2e7849beb2f250e8d6b13dd0628df543f70fcf4
Author: Jan Funke <funke at ini.ch>
Date: Wed Dec 17 14:35:27 2014 +0100
bugfix: gradient-accumulator needs to pass local configuration to function
---
include/opengm/learning/gradient-accumulator.hxx | 27 +++++++++++++++++++-----
include/opengm/learning/struct-max-margin.hxx | 4 ++--
2 files changed, 24 insertions(+), 7 deletions(-)
diff --git a/include/opengm/learning/gradient-accumulator.hxx b/include/opengm/learning/gradient-accumulator.hxx
index 151967c..f702aeb 100644
--- a/include/opengm/learning/gradient-accumulator.hxx
+++ b/include/opengm/learning/gradient-accumulator.hxx
@@ -25,9 +25,17 @@ public:
/**
* @param gradient
- * ModelWeights reference to store the gradients.
+ * ModelWeights reference to store the gradients. Gradient
+ * values will only be added (or subtracted, if mode ==
+ * Subtract), so you have to make sure gradient is properly
+ * initialized to zero.
+ *
* @param configuration
- * Current configuration of the variables in the model.
+ * Configuration of the variables in the model, to evaluate the
+ * gradient for.
+ *
+ * @param mode
+ * Add or Subtract the weight gradients from gradient.
*/
GradientAccumulator(ModelWeights& gradient, const ConfigurationType& configuration, Mode mode = Add) :
_gradient(gradient),
@@ -38,14 +46,23 @@ public:
gradient[i] = 0;
}
- template <typename FunctionType>
- void operator()(const FunctionType& function) {
+ template <typename Iterator, typename FunctionType>
+ void operator()(Iterator begin, Iterator end, const FunctionType& function) {
+
+ ConfigurationType localConfiguration;
+ for (Iterator j = begin; j != end; j++)
+ localConfiguration.push_back(_configuration[*j]);
+
+ std::cout << "asking a function for gradient with configuration " << localConfiguration << std::endl;
for (int i = 0; i < function.numberOfWeights(); i++) {
int index = function.weightIndex(i);
- double g = function.weightGradient(i, _configuration.begin());
+ double g = function.weightGradient(i, localConfiguration.begin());
+
+ std::cout << "gradient for weight " << index << " is " << g << std::endl;
+
if (_mode == Add)
_gradient[index] += g;
else
diff --git a/include/opengm/learning/struct-max-margin.hxx b/include/opengm/learning/struct-max-margin.hxx
index 47031e1..405489e 100644
--- a/include/opengm/learning/struct-max-margin.hxx
+++ b/include/opengm/learning/struct-max-margin.hxx
@@ -132,8 +132,8 @@ private:
GA gaMostViolated(gradient, mostViolated, GA::Subtract);
for (size_t j = 0; j < gm.numberOfFactors(); j++) {
- gm[j].callFunctor(gaBestEffort);
- gm[j].callFunctor(gaMostViolated);
+ gm[j].callViFunctor(gaBestEffort);
+ gm[j].callViFunctor(gaMostViolated);
}
_dataset.unlockModel(i);
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git
More information about the debian-science-commits
mailing list