[opengm] 201/386: fixed bugs...
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:40 UTC 2016
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch debian/master
in repository opengm.
commit 48482cdf4ed854994912b6c6e764a2e7f52c85ea
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date: Mon Jan 5 18:29:54 2015 +0100
fixed bugs...
---
fubar/real_example_2.py | 4 +-
include/opengm/functions/learnable/lunary.hxx | 12 +++-
include/opengm/graphicalmodel/weights.hxx | 29 +++++----
include/opengm/learning/structured_perceptron.hxx | 22 -------
include/opengm/learning/subgradient_ssvm.hxx | 36 ++++++++---
include/opengm/python/numpyview.hxx | 7 ++-
src/interfaces/python/opengm/learning/__init__.py | 8 ++-
.../python/opengm/learning/pyLFunctionGen.cxx | 73 +++++++++++++++++-----
.../python/opengm/opengmcore/__init__.py | 12 +++-
.../python/opengm/opengmcore/opengmcore.cpp | 34 ++++++++++
10 files changed, 169 insertions(+), 68 deletions(-)
diff --git a/fubar/real_example_2.py b/fubar/real_example_2.py
index 361b4e3..3429fad 100644
--- a/fubar/real_example_2.py
+++ b/fubar/real_example_2.py
@@ -7,7 +7,7 @@ import pylab
nModels = 20
nLables = 2
-shape = [40, 40]
+shape = [200, 200]
numVar = shape[0]*shape[1]
sSmooth = [1.0,1.1,1.2, 1.5, 2.0, 3.0, 4.0]
@@ -132,7 +132,7 @@ nTestPoints =numpy.ones(nWeights).astype('uint64')*5
#learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
#learner = learning.maxLikelihoodLearner(dataset)
#learner = learning.structPerceptron(dataset, decayExponent=-0.5, learningMode='batch')
-learner = learning.subgradientSSVM(dataset, learningRate=1.0, C=100, learningMode='batch')
+learner = learning.subgradientSSVM(dataset, learningRate=1.0, C=100)
learner.learn(infCls=opengm.inference.QpboExternal,
diff --git a/include/opengm/functions/learnable/lunary.hxx b/include/opengm/functions/learnable/lunary.hxx
index 89b8c93..ded00c2 100644
--- a/include/opengm/functions/learnable/lunary.hxx
+++ b/include/opengm/functions/learnable/lunary.hxx
@@ -265,14 +265,24 @@ LUnary<T, I, L>::operator()
ITERATOR begin
) const {
-
+ //std::cout<<"LUnary::operator()\n";
+ //OPENGM_CHECK_OP( int(weights_==NULL),==,int(false),"foo");
T val = 0;
const LabelType l(*begin);
const size_t nwForL = numWeightsForL(l);
+ //std::cout<<"nw for l "<<nwForL<<"\n";
+ //std::cout<<"wsize "<<weights_->size()<<"\n";
+
for(size_t i=0; i<nwForL; ++i){
+ //std::cout<<" i "<<i<<"\n";
+ //OPENGM_CHECK_OP(weightIdOffset(l)+i,<,weightIds_.size(),"foo");
+ //OPENGM_CHECK_OP(featureOffset(l)+i,<,features_.size(),"foo");
const size_t wi = weightIds_[weightIdOffset(l)+i];
+ //OPENGM_CHECK_OP(wi,<,weights_->size(),"foo");
+
val += weights_->getWeight(wi) * features_[featureOffset(l)+i];
}
+ //d::cout<<"LUnary::return operator()\n";
return val;
}
diff --git a/include/opengm/graphicalmodel/weights.hxx b/include/opengm/graphicalmodel/weights.hxx
index f8e6438..948ab68 100644
--- a/include/opengm/graphicalmodel/weights.hxx
+++ b/include/opengm/graphicalmodel/weights.hxx
@@ -7,41 +7,44 @@ namespace opengm{
namespace learning{
template<class T>
- class Weights : public marray::Vector<T>
+ class Weights
{
public:
typedef T ValueType;
Weights(const size_t numberOfWeights=0)
- : marray::Vector<T>(numberOfWeights){
+ : weights_(numberOfWeights)
+ {
}
ValueType getWeight(const size_t pi)const{
OPENGM_ASSERT_OP(pi,<,weights_.size());
- return (*this)[pi];
- //return weights_[pi];
+ return weights_[pi];
}
void setWeight(const size_t pi,const ValueType value){
OPENGM_ASSERT_OP(pi,<,weights_.size());
- (*this)[pi] = value;
+ weights_[pi] = value;
}
- //const ValueType& operator[](const size_t pi)const{
- // return weights_[pi];
- //}
- //ValueType& operator[](const size_t pi) {
- // return weights_[pi];
- //}
+ const ValueType& operator[](const size_t pi)const{
+ return weights_[pi];
+ }
+ ValueType& operator[](const size_t pi) {
+ return weights_[pi];
+ }
size_t numberOfWeights()const{
- return this->size();
+ return weights_.size();
+ }
+ size_t size()const{
+ return weights_.size();
}
private:
- //std::vector<ValueType> weights_;
+ std::vector<ValueType> weights_;
};
} // namespace learning
} // namespace opengm
diff --git a/include/opengm/learning/structured_perceptron.hxx b/include/opengm/learning/structured_perceptron.hxx
index 8564aa2..09b685f 100644
--- a/include/opengm/learning/structured_perceptron.hxx
+++ b/include/opengm/learning/structured_perceptron.hxx
@@ -151,28 +151,6 @@ namespace opengm {
featureAcc_.resetWeights();
- //std::vector< std::vector<LabelType> > args(nModels);
- //#pragma omp parallel for
- //for(size_t gmi=0; gmi<nModels; ++gmi)
- //{
- // int tid = omp_get_thread_num();
- // std::cout<<"Hello World from thread"<<tid<<"\n";
-//
- // dataset_.lockModel(gmi);
- // opengm::infer<INF>(dataset_.getModel(gmi), para, args[gmi]);
- // dataset_.unlockModel(gmi);
- //}
-//
- //for(size_t gmi=0; gmi<nModels; ++gmi)
- //{
- // dataset_.lockModel(gmi);
- // featureAcc_.accumulateModelFeatures(dataset_.getModel(gmi),
- // dataset_.getGT(gmi).begin(),
- // args[gmi].begin());
- // dataset_.unlockModel(gmi);
- //}
-
-
omp_lock_t modelLockUnlock;
omp_init_lock(&modelLockUnlock);
diff --git a/include/opengm/learning/subgradient_ssvm.hxx b/include/opengm/learning/subgradient_ssvm.hxx
index 592f53a..1427621 100644
--- a/include/opengm/learning/subgradient_ssvm.hxx
+++ b/include/opengm/learning/subgradient_ssvm.hxx
@@ -46,7 +46,7 @@ namespace opengm {
stopLoss_ = 0.0;
learningRate_ = 1.0;
C_ = 1.0;
- learningMode_ = Online;
+ learningMode_ = Batch;
}
double eps_;
@@ -78,6 +78,20 @@ namespace opengm {
}
}
+ double getLoss(const GMType & gm ,const GMWITHLOSS & gmWithLoss, std::vector<LabelType> & labels){
+
+ double loss = 0 ;
+ std::vector<LabelType> subConf(20,0);
+
+ for(size_t fi=gm.numberOfFactors(); fi<gmWithLoss.numberOfFactors(); ++fi){
+ for(size_t v=0; v<gmWithLoss[fi].numberOfVariables(); ++v){
+ subConf[v] = labels[ gmWithLoss[fi].variableIndex(v)];
+ }
+ loss += gmWithLoss[fi](subConf.begin());
+ }
+ return loss;
+ }
+
private:
double updateWeights();
@@ -122,7 +136,7 @@ namespace opengm {
std::cout<<"online mode\n";
for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
- if(iteration_%nModels==0){
+ if(iteration_%nModels*10==0){
std::cout<<"loss :"<<dataset_. template getTotalLoss<INF>(para)<<"\n";
}
@@ -149,9 +163,7 @@ namespace opengm {
std::cout<<"batch mode\n";
for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
// this
- if(iteration_%1==0){
- std::cout<<"loss :"<<dataset_. template getTotalLoss<INF>(para)<<"\n";
- }
+
// reset the weights
featureAcc_.resetWeights();
@@ -164,8 +176,8 @@ namespace opengm {
omp_lock_t featureAccLock;
omp_init_lock(&featureAccLock);
-
- #pragma omp parallel for
+ double totalLoss = 0;
+ #pragma omp parallel for reduction(+:totalLoss)
for(size_t gmi=0; gmi<nModels; ++gmi)
{
@@ -177,14 +189,16 @@ namespace opengm {
const GMWITHLOSS & gmWithLoss = dataset_.getModelWithLoss(gmi);
+ const GMType & gm = dataset_.getModel(gmi);
//run inference
std::vector<LabelType> arg;
opengm::infer<InfLossGm>(gmWithLoss, infLossGmParam, arg);
+ totalLoss = totalLoss + getLoss(gm, gmWithLoss, arg);
//
FeatureAcc featureAcc(nWegihts);
- featureAcc.accumulateModelFeatures(dataset_.getModel(gmi), dataset_.getGT(gmi).begin(), arg.begin());
+ featureAcc.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
// acc features
@@ -197,10 +211,12 @@ namespace opengm {
dataset_.unlockModel(gmi);
omp_unset_lock(&modelLockUnlock);
}
-
+ if(iteration_%1==0){
+ std::cout<<"loss :"<< -1.0*totalLoss <<"\n";
+ }
// update the weights
const double wChange =updateWeights();
-
+
}
}
diff --git a/include/opengm/python/numpyview.hxx b/include/opengm/python/numpyview.hxx
index 8aae5e8..1be7c44 100644
--- a/include/opengm/python/numpyview.hxx
+++ b/include/opengm/python/numpyview.hxx
@@ -31,7 +31,8 @@ public:
typedef typename marray::View< V ,false >::const_iterator ConstIteratorType;
typedef size_t const * ShapeIteratorType;
- NumpyView( ):allocFromCpp_(false){
+ NumpyView():allocFromCpp_(false){
+
}
NumpyView( boost::python::object obj):allocFromCpp_(false){
boost::python::numeric::array array = boost::python::extract<boost::python::numeric::array > (obj);
@@ -189,6 +190,10 @@ public:
return new_view;
}
+ marray::View< V ,false > view()const{
+ return view_;
+ }
+
//boost::python::object arrayObject()const{
// return arrayObj_;
//};
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index e067465..6114e65 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -255,9 +255,13 @@ def lUnaryFunctions(weights,numberOfLabels, features, weightIds,
addConstFeature = bool(addConstFeature)
)
- res.__dict__['_features_'] = weights
- res.__dict__['_weights_'] = features
+ res.__dict__['_features_'] =features
+ res.__dict__['_ff_'] = ff
+ res.__dict__['_weights_'] = weights
+
return res
+ else :
+ raise RuntimeError("noy yet implemented")
def lPottsFunctions(weights, numberOfLabels, features, weightIds,
addConstFeature = False):
diff --git a/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx b/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx
index 2028251..03fde2b 100644
--- a/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx
+++ b/src/interfaces/python/opengm/learning/pyLFunctionGen.cxx
@@ -44,7 +44,7 @@ namespace opengm{
weights_(weights),
numFunctions_(numFunctions),
numLabels_(numLabels),
- features_(features),
+ features_(features.view()),
weightIds_(weightIds.begin(), weightIds.end()),
addConstFeature_(addConstFeature)
{
@@ -86,7 +86,7 @@ namespace opengm{
WeightType & weights_;
size_t numFunctions_;
size_t numLabels_;
- op::NumpyView<ValueType, 2> features_;
+ marray::Marray<ValueType> features_;
std::vector<size_t> weightIds_;
bool addConstFeature_;
};
@@ -108,8 +108,8 @@ namespace opengm{
WeightType & weights,
const size_t numFunctions,
const size_t numLabels,
- op::NumpyView<ValueType, 2> features,
- op::NumpyView<IndexType, 2> weightIds,
+ op::NumpyView<ValueType, 2> & features,
+ op::NumpyView<IndexType, 2> & weightIds,
const bool makeFirstEntryConst,
const bool addConstFeature
):
@@ -117,19 +117,47 @@ namespace opengm{
weights_(weights),
numFunctions_(numFunctions),
numLabels_(numLabels),
- features_(features),
- weightIds_(weightIds),
+ features_(features.view()),
+ //weightIds_(weightIds),
makeFirstEntryConst_(makeFirstEntryConst),
addConstFeature_(addConstFeature)
{
+ //std::cout<<"constructor\n";
+
+ //std::cout<<" features (1000,1)"<<features(1000,1)<<"\n";
+ //std::cout<<" features_(1000,1)"<<features_(1000,1)<<"\n";
OPENGM_CHECK_OP(features.shape(0), == , numFunctions, "wrong shape");
OPENGM_CHECK_OP(weightIds.shape(1), == , features.shape(1) + int(addConstFeature), "wrong shape");
OPENGM_CHECK_OP(weightIds.shape(0)+int(makeFirstEntryConst), == ,numLabels, "wrong shape");
+
+
+ const size_t nFeat =features_.shape(1);
+ const size_t nWPerL = nFeat+int(addConstFeature_);
+ const size_t wShape[2] = {numLabels_- int(makeFirstEntryConst_) ,nWPerL};
+
+ wIds_ = marray::Marray<size_t>(wShape, wShape+2);
+
+ //std::cout<<"assignment\n";
+ //std::cout<<"passed wi shape "<<weightIds.shape(0)<<" "<<weightIds.shape(1)<<" given "<<wShape[0]<<" "<<wShape[1]<<"\n";
+ //std::cout<<"wIds_ shape "<<wIds_.shape(0)<<" "<<wIds_.shape(1)<<"\n";
+
+ for(size_t ll=0; ll<wShape[0]; ++ll){
+ for(size_t wi=0; wi<wShape[1]; ++wi){
+ //std::cout<<"ll "<<ll<<" wi "<<wi<<"\n";
+ size_t passed = weightIds(ll,wi);
+ //std::cout<<"passed "<<passed<<"\n";
+ wIds_(ll,wi) = passed;
+ }
+ }
+ //std::cout<<"constructor done\n";
}
template<class GM>
std::vector< typename GM::FunctionIdentifier > * addFunctionsGeneric(GM & gm)const{
+ //std::cout<<"&** features_(1000,1)"<<features_(1000,1)<<"\n";
+
+
typedef typename GM::FunctionIdentifier Fid;
typedef std::vector<Fid> FidVector;
@@ -145,21 +173,29 @@ namespace opengm{
const size_t wShape[2] = {numLabels_- int(makeFirstEntryConst_) ,nWPerL};
marray::Marray<size_t> _weightIds(wShape, wShape+2);
- for(size_t ll=0; ll<wShape[0]; ++ll)
- for(size_t wi=0; wi<wShape[1]; ++wi){
- _weightIds(ll,wi) = weightIds_(ll,wi);
- }
+ //for(size_t ll=0; ll<wShape[0]; ++ll)
+ //for(size_t wi=0; wi<wShape[1]; ++wi){
+ // _weightIds(ll,wi) = weightIds_(ll,wi);
+ //}
for(size_t i=0;i<numFunctions_;++i){
// copy the features for that instance
for(size_t f=0; f<nFeat; ++f){
+ //std::cout<<"added feat:"<<features_(i,f)<<"\n";
fFeat(f) = features_(i,f);
}
if(addConstFeature_){
fFeat(nFeat) = 1.0;
}
- FType(weights_, numLabels_, _weightIds, fFeat, makeFirstEntryConst_);
+ FType f(weights_, numLabels_, wIds_, fFeat, makeFirstEntryConst_);
+
+ //std::cout<<"INTERNAL TEST\n";
+ //for(size_t l=0;l<numLabels_; ++l){
+ // std::cout<<"l "<<l<<" f(l) = "<<f(&l)<<"\n";
+ //}
+
+ (*fidVector)[i] = gm.addFunction(f);
}
return fidVector;
}
@@ -175,16 +211,19 @@ namespace opengm{
WeightType & weights_;
size_t numFunctions_;
size_t numLabels_;
- op::NumpyView<ValueType, 2> features_;
+
+ marray::Marray<ValueType> features_;
+ //op::NumpyView<ValueType, 2> features_;
op::NumpyView<IndexType, 2> weightIds_;
bool makeFirstEntryConst_;
bool addConstFeature_;
+ marray::Marray<size_t> wIds_;
};
template<class GM_ADDER,class GM_MULT>
FunctionGeneratorBase<GM_ADDER,GM_MULT> * lunarySharedFeatFunctionGen(
- ol::Weights<typename GM_ADDER::ValueType> weights,
+ ol::Weights<typename GM_ADDER::ValueType> & weights,
const size_t numFunctions,
const size_t numLabels,
opengm::python::NumpyView<typename GM_ADDER::ValueType,2> features,
@@ -202,7 +241,7 @@ namespace opengm{
template<class GM_ADDER,class GM_MULT>
FunctionGeneratorBase<GM_ADDER,GM_MULT> * lpottsFunctionGen(
- ol::Weights<typename GM_ADDER::ValueType> weights,
+ ol::Weights<typename GM_ADDER::ValueType> & weights,
const size_t numFunctions,
const size_t numLabels,
opengm::python::NumpyView<typename GM_ADDER::ValueType,2> features,
@@ -229,7 +268,8 @@ namespace opengm{
void export_lfunction_generator(){
typedef LPottsFunctionGen<GM_ADDER, GM_MULT> FGen;
- bp::def("_lpottsFunctionsGen",&lpottsFunctionGen<GM_ADDER,GM_MULT>,bp::return_value_policy<bp::manage_new_object>(),
+ bp::def("_lpottsFunctionsGen",&lpottsFunctionGen<GM_ADDER,GM_MULT>,
+ bp::return_value_policy<bp::manage_new_object>(),
(
bp::arg("weights"),
bp::arg("numFunctions"),
@@ -240,7 +280,8 @@ namespace opengm{
)
);
- bp::def("_lunarySharedFeatFunctionsGen",&lunarySharedFeatFunctionGen<GM_ADDER,GM_MULT>,bp::return_value_policy<bp::manage_new_object>(),
+ bp::def("_lunarySharedFeatFunctionsGen",&lunarySharedFeatFunctionGen<GM_ADDER,GM_MULT>,
+ bp::with_custodian_and_ward_postcall<0, 4, bp::return_value_policy<bp::manage_new_object> >(),
(
bp::arg("weights"),
bp::arg("numFunctions"),
diff --git a/src/interfaces/python/opengm/opengmcore/__init__.py b/src/interfaces/python/opengm/opengmcore/__init__.py
index f7a8ef8..909641f 100644
--- a/src/interfaces/python/opengm/opengmcore/__init__.py
+++ b/src/interfaces/python/opengm/opengmcore/__init__.py
@@ -1,4 +1,5 @@
from _opengmcore import *
+from _opengmcore import _gridVis2d
from factorSubset import FactorSubset
from gm_injector import _extend_gm_classes
from factor_injector import _extend_factor_classes
@@ -191,7 +192,16 @@ class Multiplier:
def neutral(self):
return float(1.0)
-
+
+def gridVis(shape, numpyOrder=True):
+ assert len(shape) == 2
+ nFac = (shape[0]-1)*shape[1] + (shape[1]-1)*shape[0]
+ out = numpy.ones([nFac,2], dtype=index_type)
+ _gridVis2d(shape[0],shape[1],numpyOrder, out)
+ return out
+
+
+
#Model generators
def grid2d2Order(unaries,regularizer,order='numpy',operator='adder'):
"""
diff --git a/src/interfaces/python/opengm/opengmcore/opengmcore.cpp b/src/interfaces/python/opengm/opengmcore/opengmcore.cpp
index e6c8689..a9cb5fa 100644
--- a/src/interfaces/python/opengm/opengmcore/opengmcore.cpp
+++ b/src/interfaces/python/opengm/opengmcore/opengmcore.cpp
@@ -278,6 +278,37 @@ GM * pyPottsModel3d(
}
+
+
+void gridVis2d(
+ const size_t dx,
+ const size_t dy,
+ const bool numpyOrder,
+ opengm::python::NumpyView< opengm::python::GmIndexType, 2> visarray
+){
+ size_t shape[2]={dx,dy};
+ CoordToVi toVi(shape,shape+2,numpyOrder);
+
+ size_t c=0;
+
+ for(size_t x=0; x<dx;++x)
+ for(size_t y=0; y<dy;++y){
+
+ if(x+1<dx){
+ visarray(c,0) = toVi(x,y);
+ visarray(c,1) = toVi(x+1,y);
+ ++c;
+ }
+ if(y+1<dy){
+ visarray(c,0) = toVi(x,y);
+ visarray(c,1) = toVi(x,y+1);
+ ++c;
+ }
+ }
+}
+
+
+
void makeMaskedState(
opengm::python::NumpyView< opengm::UInt32Type, 3> mask,
opengm::python::NumpyView< opengm::UInt64Type, 1> arg,
@@ -681,6 +712,9 @@ BOOST_PYTHON_MODULE_INIT(_opengmcore) {
}
+
+ boost::python::def("_gridVis2d",&gridVis2d);
+
//export_rag();
export_config();
export_vectors<opengm::python::GmIndexType>();
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git
More information about the debian-science-commits
mailing list