[opengm] 267/386: learning changes

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:08 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit f71316bdcd910b13c67d4263600116954db562f7
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Tue Mar 10 16:37:32 2015 +0100

    learning changes
---
 fubar/brown_horse_sp.py                            |   6 +-
 fubar/noisy_squares_3_labels.py                    |  22 +-
 include/opengm/inference/multicut.hxx              |   8 +-
 include/opengm/learning/rws.hxx                    | 286 +++++++++++++++++++++
 .../python/opengm/learning/CMakeLists.txt          |   1 +
 src/interfaces/python/opengm/learning/__init__.py  |  23 +-
 src/interfaces/python/opengm/learning/helper.hxx   |  10 +-
 src/interfaces/python/opengm/learning/learning.cxx |   5 +-
 src/interfaces/python/opengm/learning/pyRws.cxx    |  72 ++++++
 9 files changed, 407 insertions(+), 26 deletions(-)

diff --git a/fubar/brown_horse_sp.py b/fubar/brown_horse_sp.py
index 275be00..1d1579b 100644
--- a/fubar/brown_horse_sp.py
+++ b/fubar/brown_horse_sp.py
@@ -22,7 +22,7 @@ imgPath = dsetRoot + 'rgb/'
 gtBasePath = dsetRoot + 'figure_ground/'
 
 imgFiles = glob.glob(imgPath+'*.jpg')
-takeNth = 1
+takeNth = 2
 
 imgs = []
 sps = []
@@ -33,7 +33,7 @@ pbar = getPbar(len(imgFiles), 'Load Image')
 pbar.start()
 for i,path in enumerate(imgFiles):
 
-    if i>50 :
+    if i>20 :
         break
     gtPath =  gtBasePath + os.path.basename(path)
     rgbImg  = vigra.impex.readImage(path)
@@ -172,7 +172,7 @@ dataset,test_set = superpixelDataset(imgs=imgs,sps=sps, gts=gts, numberOfLabels=
 
 
 learner =  learning.subgradientSSVM(dataset, learningRate=0.1, C=0.1, 
-                                    learningMode='batch',maxIterations=1000, averaging=-1)
+                                    learningMode='batch',maxIterations=2000, averaging=-1)
 
 
 #learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
diff --git a/fubar/noisy_squares_3_labels.py b/fubar/noisy_squares_3_labels.py
index acd70e1..75e3482 100644
--- a/fubar/noisy_squares_3_labels.py
+++ b/fubar/noisy_squares_3_labels.py
@@ -11,7 +11,7 @@ from opengm.learning import secondOrderImageDataset, getPbar
 numpy.random.seed(42)
 
 nImages = 8 
-shape = [30, 30]
+shape = [15, 15]
 noise = 2.0
 imgs = []
 gts = []
@@ -29,12 +29,12 @@ for i in range(nImages):
 
     gtImg = vigra.sampling.rotateImageDegree(gtImg.astype(numpy.float32),int(ra),splineOrder=0)
 
-    if i<2 :
+    if i<1 :
         vigra.imshow(gtImg)
         vigra.show()
 
     img = gtImg + numpy.random.random(shape)*float(noise)
-    if i<2 :
+    if i<1 :
         vigra.imshow(img)
         vigra.show()
 
@@ -97,16 +97,12 @@ lm = 0
 infCls = opengm.inference.TrwsExternal
 param = opengm.InfParam()
 
-if False:
-    print "construct learner"
-    learner = learning.maxLikelihoodLearner(dataset)
-    print "start to learn"
-    learner.learn()
-    print "exit"
 
-else:
-   learner =  learning.subgradientSSVM(dataset, learningRate=0.5, C=100, learningMode='batch',maxIterations=500,averaging=-1,nConf=0)
-   learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='n')
+learner =  learning.subgradientSSVM(dataset, learningRate=1.0, C=0.9, learningMode='batch',maxIterations=200, averaging=-1)
+learner.learn(infCls=infCls,parameter=param,connectedComponents=False,infMode='n')
+
+learner =  learning.rws(dataset, learningRate=1.0, C=0.05,maxIterations=5000, p=20, sigma=2.0)
+learner.learn(infCls=infCls,parameter=param,connectedComponents=False,infMode='n')
 
 
 # predict on test test
@@ -122,4 +118,4 @@ for (rgbImg, gtImg, gm) in test_set :
 
     vigra.imshow(arg+2)
     vigra.show()
-
+    break
diff --git a/include/opengm/inference/multicut.hxx b/include/opengm/inference/multicut.hxx
index 2fff090..bbe9780 100644
--- a/include/opengm/inference/multicut.hxx
+++ b/include/opengm/inference/multicut.hxx
@@ -1374,14 +1374,14 @@ Multicut<GM,ACC>::infer(VisitorType& mcv)
       throw RuntimeError("Error:  Model can not be solved!"); 
    }
    else if(!readWorkFlow(parameter_.workFlow_)){//Use given workflow if posible
-      std::cout << "Warning: can not parse workflow : " << parameter_.workFlow_ <<std::endl;
-      std::cout << "Using default workflow ";
+      //std::cout << "Warning: can not parse workflow : " << parameter_.workFlow_ <<std::endl;
+      //std::cout << "Using default workflow ";
       if(problemType_ == MWC){
-         std::cout << "(TTC)(MTC)(IC)(CC-IFD,TTC-I)" <<std::endl;
+         //std::cout << "(TTC)(MTC)(IC)(CC-IFD,TTC-I)" <<std::endl;
          readWorkFlow("(TTC)(MTC)(IC)(CC-IFD,TTC-I)");
       }
       else if(problemType_ == MC){
-         std::cout << "(CC-FDB)(IC)(CC-I)" <<std::endl;
+         //std::cout << "(CC-FDB)(IC)(CC-I)" <<std::endl;
          readWorkFlow("(CC-FDB)(IC)(CC-I)");
       }
       else{
diff --git a/include/opengm/learning/rws.hxx b/include/opengm/learning/rws.hxx
new file mode 100644
index 0000000..dadc60a
--- /dev/null
+++ b/include/opengm/learning/rws.hxx
@@ -0,0 +1,286 @@
+#pragma once
+#ifndef OPENGM_SUBGRADIENT_SSVM_LEARNER_HXX
+#define OPENGM_SUBGRADIENT_SSVM_LEARNER_HXX
+
+#include <iomanip>
+#include <vector>
+#include <opengm/inference/inference.hxx>
+#include <opengm/graphicalmodel/weights.hxx>
+#include <opengm/utilities/random.hxx>
+#include <opengm/learning/gradient-accumulator.hxx>
+#include <opengm/learning/weight_averaging.hxx>
+
+#ifdef WITH_OPENMP
+#include <omp.h>
+#endif
+
+#include <boost/circular_buffer.hpp>
+#include <boost/math/distributions/normal.hpp>
+#include <boost/random/normal_distribution.hpp>
+#include <boost/random/mersenne_twister.hpp>
+#include <boost/random/variate_generator.hpp>
+
+
+namespace opengm {
+    namespace learning {
+
+
+
+    template<class T>
+    double gen_normal_3(T &generator)
+    {
+      return generator();
+    }
+
+    // Version that fills a vector
+    template<class T>
+    void gen_normal_3(T &generator,
+                  std::vector<double> &res)
+    {
+      for(size_t i=0; i<res.size(); ++i)
+        res[i]=generator();
+    }
+
+
+           
+    template<class DATASET>
+    class Rws
+    {
+    public: 
+        typedef DATASET DatasetType;
+        typedef typename DATASET::GMType   GMType; 
+        typedef typename DATASET::GMWITHLOSS GMWITHLOSS;
+        typedef typename DATASET::LossType LossType;
+        typedef typename GMType::ValueType ValueType;
+        typedef typename GMType::IndexType IndexType;
+        typedef typename GMType::LabelType LabelType; 
+        typedef opengm::learning::Weights<double> WeightsType;
+        typedef typename std::vector<LabelType>::const_iterator LabelIterator;
+        typedef FeatureAccumulator<GMType, LabelIterator> FeatureAcc;
+
+        typedef std::vector<LabelType> ConfType;
+        typedef boost::circular_buffer<ConfType> ConfBuffer;
+        typedef std::vector<ConfBuffer> ConfBufferVec;
+
+        class Parameter{
+        public:
+
+
+
+            Parameter(){
+                eps_ = 0.00001;
+                maxIterations_ = 10000;
+                stopLoss_ = 0.0;
+                learningRate_ = 1.0;
+                C_ = 1.0;
+                averaging_ = -1;
+                p_ = 10;
+                sigma_ = 1.0;
+            }       
+
+            double eps_;
+            size_t maxIterations_;
+            double stopLoss_;
+            double learningRate_;
+            double C_;
+            int averaging_;
+            size_t p_;
+            double sigma_;
+        };
+
+
+        Rws(DATASET&, const Parameter& );
+
+        template<class INF>
+        void learn(const typename INF::Parameter& para); 
+        //template<class INF, class VISITOR>
+        //void learn(typename INF::Parameter para, VITITOR vis);
+
+        const opengm::learning::Weights<double>& getWeights(){return weights_;}
+        Parameter& getLerningParameters(){return para_;}
+
+
+
+        double getLoss(const GMType & gm ,const GMWITHLOSS  & gmWithLoss, std::vector<LabelType> & labels){
+
+            double loss = 0 ;
+            std::vector<LabelType> subConf(20,0);
+
+            for(size_t fi=gm.numberOfFactors(); fi<gmWithLoss.numberOfFactors(); ++fi){
+                for(size_t v=0; v<gmWithLoss[fi].numberOfVariables(); ++v){
+                    subConf[v] = labels[ gmWithLoss[fi].variableIndex(v)];
+                }
+                loss +=  gmWithLoss[fi](subConf.begin());
+            }
+            return loss;
+        }
+
+    private:
+
+        double updateWeights();
+
+        DATASET& dataset_;
+        WeightsType  weights_;
+        Parameter para_;
+        size_t iteration_;
+        FeatureAcc featureAcc_;
+        WeightRegularizer<ValueType> wReg_;
+        WeightAveraging<double> weightAveraging_;
+    }; 
+
+    template<class DATASET>
+    Rws<DATASET>::Rws(DATASET& ds, const Parameter& p )
+    :   dataset_(ds), 
+        para_(p),
+        iteration_(0),
+        featureAcc_(ds.getNumberOfWeights()),
+        wReg_(2, 1.0/p.C_),
+        weightAveraging_(ds.getWeights(),p.averaging_)
+    {
+        featureAcc_.resetWeights();
+        weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
+    }
+
+
+    template<class DATASET>
+    template<class INF>
+    void Rws<DATASET>::learn(const typename INF::Parameter& para){
+
+
+        const size_t nModels = dataset_.getNumberOfModels();
+        const size_t nWegihts = dataset_.getNumberOfWeights();
+
+        
+        //for(size_t wi=0; wi<nWegihts; ++wi){
+        //    dataset_.getWeights().setWeight(wi, 0.0);
+        //}
+
+
+
+        RandomUniform<size_t> randModel(0, nModels);
+        boost::math::normal_distribution<ValueType> nDist(0.0, para_.sigma_);
+        std::vector< std::vector<ValueType> > noiseVecs(para_.p_, std::vector<ValueType>(nWegihts));
+        std::vector<ValueType> lossVec(para_.p_);
+
+        std::vector<ValueType> gradient(nWegihts);
+
+        boost::variate_generator<boost::mt19937, boost::normal_distribution<> >
+        generator(boost::mt19937(time(0)),boost::normal_distribution<>(0.0, para_.sigma_));
+
+        std::cout<<"online mode "<<nWegihts<<"\n";
+
+        std::cout <<"start loss"<< std::setw(6) << std::setfill(' ') << iteration_ << ':'
+                          << std::setw(8) << dataset_. template getTotalLossParallel<INF>(para) <<"  \n\n\n\n";
+
+
+        for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
+
+
+
+
+            // get random model
+            const size_t gmi = randModel();
+
+            // save the current weights
+            WeightsType currentWeights  = dataset_.getWeights();
+
+
+            featureAcc_.resetWeights();
+
+            // lock the model
+            dataset_.lockModel(gmi);
+
+            for(size_t p=0; p<para_.p_; ++p){
+
+
+                // fill noise 
+                gen_normal_3(generator, noiseVecs[p]);
+
+                // add noise to the weights
+                for(size_t wi=0; wi<nWegihts; ++wi){
+                    const ValueType cw = currentWeights[wi];
+                    const ValueType nw = cw + noiseVecs[p][wi];
+                    dataset_.getWeights().setWeight(wi, nw);
+                }
+
+
+                const GMType & gm = dataset_.getModel(gmi);
+                // do inference
+                std::vector<LabelType> arg;
+                opengm::infer<INF>(gm, para, arg);
+                lossVec[p] = dataset_.getLoss(arg, gmi);
+                
+                featureAcc_.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
+                // update weights
+                const double wChange =updateWeights();      
+            }
+
+            for(size_t wi=0; wi<nWegihts; ++wi){
+                gradient[wi] = featureAcc_.getWeight(wi);
+            }
+            std::fill(gradient.begin(), gradient.end(),0.0);
+            for(size_t p=0; p<para_.p_; ++p){
+                for(size_t wi=0; wi<nWegihts; ++wi){
+                    gradient[wi] += (1.0/para_.p_)*(noiseVecs[p][wi])*lossVec[p];
+                }
+            }
+
+            const ValueType actualLearningRate = para_.learningRate_/(1.0 + iteration_);
+            //const ValueType actualLearningRate = para_.learningRate_;///(1.0 + iteration_);
+            // do update
+            for(size_t wi=0; wi<nWegihts; ++wi){
+                const ValueType oldWeight = currentWeights[wi];
+                const ValueType newWeights = (1.0 + oldWeight - actualLearningRate*gradient[wi])*para_.C_;
+                //std::cout<<"wi "<<newWeights<<"\n";
+                dataset_.getWeights().setWeight(wi, newWeights);
+            }
+            std::cout<<"\n";
+            dataset_.unlockModel(gmi);
+
+            if(iteration_%10==0){
+            //if(iteration_%nModels*2 == 0 ){
+                std::cout << '\n'
+                          << std::setw(6) << std::setfill(' ') << iteration_ << ':'
+                          << std::setw(8) << dataset_. template getTotalLossParallel<INF>(para) <<"  "<< std::flush;
+
+            }
+
+        }
+  
+        weights_ = dataset_.getWeights();
+    }
+
+
+    template<class DATASET>
+    double Rws<DATASET>::updateWeights(){
+
+        const size_t nWegihts = dataset_.getNumberOfWeights();
+
+        WeightsType p(nWegihts);
+        WeightsType newWeights(nWegihts);
+
+
+        for(size_t wi=0; wi<nWegihts; ++wi){
+            p[wi] =  dataset_.getWeights().getWeight(wi);
+            p[wi] += para_.C_ * featureAcc_.getWeight(wi);
+        }
+
+
+        double wChange = 0.0;
+        
+        for(size_t wi=0; wi<nWegihts; ++wi){
+            const double wOld = dataset_.getWeights().getWeight(wi);
+            const double wNew = wOld - (para_.learningRate_/double(iteration_+1))*p[wi];
+            newWeights[wi] = wNew;
+        }
+
+        weightAveraging_(newWeights);
+
+
+
+        weights_ = dataset_.getWeights();
+        return wChange;
+    }
+}
+}
+#endif
diff --git a/src/interfaces/python/opengm/learning/CMakeLists.txt b/src/interfaces/python/opengm/learning/CMakeLists.txt
index b0fc171..05bc47c 100644
--- a/src/interfaces/python/opengm/learning/CMakeLists.txt
+++ b/src/interfaces/python/opengm/learning/CMakeLists.txt
@@ -28,6 +28,7 @@ set(PY_OPENGM_CORE_SOURCES
             pyStructMaxMarginLearner.cxx
             pySubgradientSSVM.cxx
             pyStructPerceptron.cxx
+            pyRws.cxx
             )
 
 if(APPLE)
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index ca3aca1..1844a4f 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -116,7 +116,8 @@ def extend_learn():
             self.learnReducedInfSelfFusion(infCls=infCls, parameter=parameter,**kwargs)
 
     # all learner classes
-    learnerClss = [GridSearch_FlexibleLoss, StructPerceptron_FlexibleLoss,  SubgradientSSVM_FlexibleLoss] 
+    learnerClss = [GridSearch_FlexibleLoss, StructPerceptron_FlexibleLoss,  
+                  SubgradientSSVM_FlexibleLoss, Rws_FlexibleLoss] 
     if opengmConfig.withCplex or opengmConfig.withGurobi :
         learnerClss.append(StructMaxMargin_Bundle_FlexibleLoss)
 
@@ -204,6 +205,26 @@ def structPerceptron(dataset, learningMode='online',eps=1e-5, maxIterations=1000
     return learner
 
 
+def rws(dataset,eps=1e-5, maxIterations=10000, stopLoss=0.0, learningRate=1.0, C=100.0, sigma=1.0, p=10):
+
+    assert dataset.__class__.lossType == 'flexible'
+    learnerCls = Rws_FlexibleLoss
+    learnerParamCls = Rws_FlexibleLossParameter
+
+
+    param = learnerParamCls()
+    param.eps = float(eps)
+    param.maxIterations = int(maxIterations)
+    param.stopLoss = float(stopLoss)
+    param.learningRate = float(learningRate)
+    param.C = float(C)
+    param.p = int(p)
+    param.sigma = float(sigma)
+    learner = learnerCls(dataset, param)
+    return learner
+
+
+
 def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000, stopLoss=0.0, learningRate=1.0, C=100.0, averaging=-1, nConf=0):
 
     assert dataset.__class__.lossType == 'flexible'
diff --git a/src/interfaces/python/opengm/learning/helper.hxx b/src/interfaces/python/opengm/learning/helper.hxx
index 77abf88..e95034a 100644
--- a/src/interfaces/python/opengm/learning/helper.hxx
+++ b/src/interfaces/python/opengm/learning/helper.hxx
@@ -207,11 +207,11 @@ public:
         #endif
 
         c
-            .def("_learn",&pyLearn_Inf<IcmInf>)
-            .def("_learn",&pyLearn_Inf<LazyFlipperInf>)
-            .def("_learn",&pyLearn_Inf<BpInf>)
+            //.def("_learn",&pyLearn_Inf<IcmInf>)
+            //.def("_learn",&pyLearn_Inf<LazyFlipperInf>)
+            //.def("_learn",&pyLearn_Inf<BpInf>)
             #ifdef WITH_CPLEX
-            .def("_learn",&pyLearn_Inf<Cplex>) 
+            //.def("_learn",&pyLearn_Inf<Cplex>) 
             .def("_learn",&pyLearn_Inf<Multicut>)
             #endif
             #ifdef WITH_QPBO
@@ -221,6 +221,7 @@ public:
             .def("_learn",&pyLearn_Inf<TrwsExternal>)
             #endif
 
+            #if 0
             // REDUCED INFERENCE
             #ifdef WITH_QPBO
                 .def("_learnReducedInf",&pyLearn_ReducedInf<LazyFlipperInf>)
@@ -241,6 +242,7 @@ public:
             #if defined(WITH_TRWS) && defined(WITH_QPBO)
             .def("_learnReducedInfSelfFusion",&pyLearn_ReducedInfSelfFusion<TrwsExternal>)
             #endif
+            #endif
         ;
     }
 };
diff --git a/src/interfaces/python/opengm/learning/learning.cxx b/src/interfaces/python/opengm/learning/learning.cxx
index 2026108..a45d3a9 100644
--- a/src/interfaces/python/opengm/learning/learning.cxx
+++ b/src/interfaces/python/opengm/learning/learning.cxx
@@ -46,6 +46,9 @@ namespace opengm{
     template<class DATASET>
     void export_subgradient_ssvm_learner(const std::string & clsName);
 
+    template<class DATASET>
+    void export_rws_learner(const std::string & clsName);
+
     template<class GM_ADDER,class GM_MULT>  
     void export_lfunction_generator();
 
@@ -80,7 +83,7 @@ BOOST_PYTHON_MODULE_INIT(_learning) {
     opengm::export_struct_perceptron_learner<op::GmAdderFlexibleLossDataset>("StructPerceptron_FlexibleLoss");
     opengm::export_subgradient_ssvm_learner<op::GmAdderFlexibleLossDataset>("SubgradientSSVM_FlexibleLoss");
     opengm::export_max_likelihood_learner<op::GmAdderFlexibleLossDataset>("MaxLikelihood_FlexibleLoss");
-
+    opengm::export_rws_learner<op::GmAdderFlexibleLossDataset>("Rws_FlexibleLoss");
     
     #if defined(WITH_CPLEX) || defined(WITH_GUROBI)
         opengm::export_struct_max_margin_bundle_learner< op::GmAdderFlexibleLossDataset, ol::BundleOptimizer<op::GmValueType> >("StructMaxMargin_Bundle_FlexibleLoss");
diff --git a/src/interfaces/python/opengm/learning/pyRws.cxx b/src/interfaces/python/opengm/learning/pyRws.cxx
new file mode 100644
index 0000000..43bdaf9
--- /dev/null
+++ b/src/interfaces/python/opengm/learning/pyRws.cxx
@@ -0,0 +1,72 @@
+#include <boost/python.hpp>
+#include <boost/python/module.hpp>
+#include <opengm/python/opengmpython.hxx>
+#include <opengm/python/converter.hxx>
+#include <opengm/python/numpyview.hxx>
+#include <opengm/learning/rws.hxx>
+
+#define DefaultErrorFn DefaultErrorFn_TrwsExternalRws
+#include "helper.hxx"
+
+namespace bp = boost::python;
+namespace op = opengm::python;
+namespace ol = opengm::learning;
+
+namespace opengm{
+
+
+    template<class PARAM>
+    PARAM * pyRwsParamConstructor(
+    ){
+        PARAM * p  = new PARAM();
+        return p;
+    }
+
+    template<class L >
+    L * pyRwsConstructor(
+        typename L::DatasetType & dataset,
+        const typename L::Parameter & param
+    ){
+        L * l  = new L(dataset, param);
+        return l;
+    }
+
+    template<class DATASET>
+    void export_rws_learner(const std::string & clsName){
+        typedef learning::Rws<DATASET> PyLearner;
+        typedef typename PyLearner::Parameter PyLearnerParam;
+
+        const std::string paramClsName = clsName + std::string("Parameter");
+
+
+        // learner param
+        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
+            .def("__init__", make_constructor(&pyRwsParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
+            .def_readwrite("eps",  &PyLearnerParam::eps_)
+            .def_readwrite("maxIterations", &PyLearnerParam::maxIterations_)
+            .def_readwrite("stopLoss", &PyLearnerParam::stopLoss_)
+            .def_readwrite("learningRate", &PyLearnerParam::learningRate_)
+            .def_readwrite("C", &PyLearnerParam::C_)
+            .def_readwrite("p", &PyLearnerParam::p_)
+            .def_readwrite("sigma", &PyLearnerParam::sigma_)
+        ;
+
+
+        // learner
+        bp::class_<PyLearner>( clsName.c_str(), bp::no_init )
+        .def("__init__", make_constructor(&pyRwsConstructor<PyLearner> ,boost::python::default_call_policies()))
+        .def(LearnerInferenceSuite<PyLearner>())
+        ;
+    }
+
+    // template void 
+    // export_subgradient_ssvm_learner<op::GmAdderHammingLossDataset> (const std::string& className);
+
+    // template void 
+    // export_subgradient_ssvm_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
+
+    template void 
+    export_rws_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
+}
+
+

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list