[opengm] 222/386: added isPotts overload to potts function
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:53 UTC 2016
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch debian/master
in repository opengm.
commit 31f4b1a9a699b5efd45a771dd82b5af968ff94fd
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date: Mon Jan 12 15:52:06 2015 +0100
added isPotts overload to potts function
---
fubar/toy_dataset.py | 17 +++++--
include/opengm/functions/learnable/lpotts.hxx | 3 ++
include/opengm/learning/gradient-accumulator.hxx | 17 ++++---
include/opengm/learning/subgradient_ssvm.hxx | 53 ++--------------------
src/interfaces/python/opengm/learning/__init__.py | 5 +-
.../python/opengm/learning/pySubgradientSSVM.cxx | 1 -
6 files changed, 31 insertions(+), 65 deletions(-)
diff --git a/fubar/toy_dataset.py b/fubar/toy_dataset.py
index 10f90d0..48dda9d 100644
--- a/fubar/toy_dataset.py
+++ b/fubar/toy_dataset.py
@@ -8,7 +8,7 @@ import os
from functools import partial
from make_grid_potts_dset import secondOrderImageDataset, getPbar
-
+numpy.random.seed(42)
nImages = 8
shape = [100, 100]
@@ -89,7 +89,7 @@ dataset,test_set = secondOrderImageDataset(imgs=imgs, gts=gts, numberOfLabels=3,
-learner = learning.subgradientSSVM(dataset, learningRate=50, C=100, learningMode='batch',maxIterations=500,averaging=2)
+
learningModi = ['normal','reducedinference','selfFusion','reducedinferenceSelfFusion']
lm = 0
@@ -99,12 +99,21 @@ infCls = opengm.inference.TrwsExternal
param = opengm.InfParam()
+learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
+learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='n')
+
#with opengm.Timer("n 2"):
# learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='n')
#with opengm.Timer("sf"):
# learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='sf')
-with opengm.Timer("ri"):
- learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='ri')
+#with opengm.Timer("ri -30"):
+# learner = learning.subgradientSSVM(dataset, learningRate=0.5, C=100, learningMode='batch',maxIterations=200,averaging=-1,nConf=2)
+# learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='n')
+
+#with opengm.Timer("ri -0"):
+#
+# learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='n')
+
#with opengm.Timer("risf"):
# learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='risf')
diff --git a/include/opengm/functions/learnable/lpotts.hxx b/include/opengm/functions/learnable/lpotts.hxx
index 5ce3263..a9938c6 100644
--- a/include/opengm/functions/learnable/lpotts.hxx
+++ b/include/opengm/functions/learnable/lpotts.hxx
@@ -59,6 +59,9 @@ public:
template<class ITERATOR>
T weightGradient(size_t,ITERATOR) const;
+ bool isPotts() const {return true;}
+ bool isGeneralizedPotts() const {return true;}
+
protected:
mutable const opengm::learning::Weights<T> * weights_;
L numLabels_;
diff --git a/include/opengm/learning/gradient-accumulator.hxx b/include/opengm/learning/gradient-accumulator.hxx
index 601a68f..f605122 100644
--- a/include/opengm/learning/gradient-accumulator.hxx
+++ b/include/opengm/learning/gradient-accumulator.hxx
@@ -84,7 +84,8 @@ struct FeatureAccumulator{
: accWeights_(nW),
gtLabel_(),
mapLabel_(),
- add_(add)
+ add_(add),
+ weight_(1.0)
{
for(size_t i=0; i<accWeights_.size(); ++i){
@@ -132,15 +133,15 @@ struct FeatureAccumulator{
if(add_){
// for gt label
- accWeights_[gwi] += f.weightGradient(wi, AccessorIter(accessorGt, 0));
+ accWeights_[gwi] += weight_*f.weightGradient(wi, AccessorIter(accessorGt, 0));
// for test label
- accWeights_[gwi] -= f.weightGradient(wi, AccessorIter(accessorMap, 0));
+ accWeights_[gwi] -= weight_*f.weightGradient(wi, AccessorIter(accessorMap, 0));
}
else{
// for gt label
- accWeights_[gwi] -= f.weightGradient(wi, AccessorIter(accessorGt, 0));
+ accWeights_[gwi] -= weight_*f.weightGradient(wi, AccessorIter(accessorGt, 0));
// for test label
- accWeights_[gwi] += f.weightGradient(wi, AccessorIter(accessorMap, 0));
+ accWeights_[gwi] += weight_*f.weightGradient(wi, AccessorIter(accessorMap, 0));
}
}
}
@@ -155,11 +156,12 @@ struct FeatureAccumulator{
void accumulateModelFeatures(
const GM & gm,
const LABEL_ITER & gtLabel,
- const LABEL_ITER & mapLabel
+ const LABEL_ITER & mapLabel,
+ const double weight = 1.0
){
gtLabel_ = gtLabel;
mapLabel_ = mapLabel;
-
+ weight_ = weight;
// iterate over all factors
// and accumulate features
for(size_t fi=0; fi<gm.numberOfFactors(); ++fi){
@@ -170,6 +172,7 @@ struct FeatureAccumulator{
LABEL_ITER gtLabel_;
LABEL_ITER mapLabel_;
bool add_;
+ double weight_;
};
diff --git a/include/opengm/learning/subgradient_ssvm.hxx b/include/opengm/learning/subgradient_ssvm.hxx
index 8efdc11..1f6fa97 100644
--- a/include/opengm/learning/subgradient_ssvm.hxx
+++ b/include/opengm/learning/subgradient_ssvm.hxx
@@ -12,6 +12,8 @@
#include <omp.h>
+
+
namespace opengm {
namespace learning {
@@ -38,8 +40,7 @@ namespace opengm {
enum LearningMode{
Online = 0,
- Batch = 1,
- WorkingSets = 2
+ Batch = 1
};
@@ -237,54 +238,6 @@ namespace opengm {
}
}
- else if(para_.learningMode_ == Parameter::WorkingSets){
-
- //std::cout<<"working sets mode\n";
- std::vector< std::vector< std::vector<LabelType> > > A(nModels);
-
- RandomUniform<size_t> randModel(0, nModels);
-
-
- for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
- // this
- const size_t gmi = randModel();
-
- // lock the model
- dataset_.lockModel(gmi);
- const GMWITHLOSS & gmWithLoss = dataset_.getModelWithLoss(gmi);
-
- // do inference
- std::vector<LabelType> arg;
- opengm::infer<InfLossGm>(gmWithLoss, infLossGmParam, arg);
-
- A[gmi].push_back(arg);
-
- // accumulate all features
- featureAcc_.resetWeights();
- size_t aWithLoss = 0;
- for(size_t jj=0; jj<A[gmi].size(); ++jj){
- if(dataset_.getLoss(A[gmi][jj], gmi)>0.0){
- ++aWithLoss;
- featureAcc_.accumulateModelFeatures(dataset_.getModel(gmi), dataset_.getGT(gmi).begin(), A[gmi][jj].begin());
- }
- }
- if(aWithLoss>0){
- for(size_t wi=0; wi<nWegihts; ++wi){
- const double n = (para_.learningRate_/double(iteration_+1));
- const double wOld = dataset_.getWeights().getWeight(wi);
- const double wNew = wOld - n*featureAcc_.getWeight(wi)/double(aWithLoss);
- dataset_.getWeights().setWeight(wi, wNew);
- }
- }
- if(iteration_%nModels*2 == 0 ){
- std::cout << '\r'
- << std::setw(6) << std::setfill(' ') << iteration_ << ':'
- << std::setw(8) << dataset_. template getTotalLossParallel<INF>(para)<<" "<< std::flush;
-
- }
- dataset_.unlockModel(gmi);
- }
- }
weights_ = dataset_.getWeights();
}
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index 87e1914..4a88903 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -210,15 +210,13 @@ def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000,
learningModeEnum = SubgradientSSVM_FlexibleLossParameter_LearningMode
lm = None
- if learningMode not in ['online','batch','workingSets']:
+ if learningMode not in ['online','batch']:
raise RuntimeError("wrong learning mode, must be 'online', 'batch' or 'workingSets' ")
if learningMode == 'online':
lm = learningModeEnum.online
if learningMode == 'batch':
lm = learningModeEnum.batch
- if learningMode == 'workingSets':
- lm = learningModeEnum.workingSets
param = learnerParamCls()
param.eps = float(eps)
param.maxIterations = int(maxIterations)
@@ -227,6 +225,7 @@ def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000,
param.C = float(C)
param.learningMode = lm
param.averaging = int(averaging)
+ param.nConf = int(nConf)
learner = learnerCls(dataset, param)
return learner
diff --git a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx b/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
index 11f7d1a..f908a02 100644
--- a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
+++ b/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
@@ -44,7 +44,6 @@ namespace opengm{
bp::enum_<typename PyLearnerParam::LearningMode>(paramEnumLearningModeName.c_str())
.value("online", PyLearnerParam::Online)
.value("batch", PyLearnerParam::Batch)
- .value("workingSets", PyLearnerParam::WorkingSets)
;
// learner param
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git
More information about the debian-science-commits
mailing list