[opengm] 249/386: temp. disabled learn method from ml-learning in python binding

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:03 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 2a4409a7ea9c411d145d16147bd55df99c2f7ca7
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Fri Jan 16 10:58:12 2015 +0100

    temp. disabled learn method from ml-learning in python binding
---
 fubar/brown_horse_sp.py                            | 123 ++++++++++++++-------
 fubar/simple_sp.py                                 |  23 ++--
 include/opengm/graphicalmodel/weights.hxx          |   8 +-
 include/opengm/learning/subgradient_ssvm.hxx       |  40 ++++---
 src/interfaces/python/opengm/learning/__init__.py  |  12 +-
 .../opengm/learning/pyMaxLikelihoodLearner.cxx     |  15 ++-
 6 files changed, 134 insertions(+), 87 deletions(-)

diff --git a/fubar/brown_horse_sp.py b/fubar/brown_horse_sp.py
index eb6f90d..275be00 100644
--- a/fubar/brown_horse_sp.py
+++ b/fubar/brown_horse_sp.py
@@ -10,22 +10,7 @@ from opengm.learning import secondOrderImageDataset, getPbar,superpixelDataset
 
 
 
-def posiFeatures(img):
-    shape = img.shape[0:2]
-    x = numpy.linspace(0, 1, shape[0])
-    y = numpy.linspace(0, 1, shape[1])
-    xv, yv = numpy.meshgrid(y, x)
-    xv -=0.5
-    yv -=0.5
 
-    rad = numpy.sqrt(xv**2 + yv**2)[:,:,None]
-    erad = numpy.exp(1.0 - rad)
-    xva = (xv**2)[:,:,None]
-    yva = (yv**2)[:,:,None]
-
-    res = numpy.concatenate([erad, rad,xva,yva,xv[:,:,None],yv[:,:,None]],axis=2)
-    assert res.shape[0:2] == img.shape[0:2]
-    return res
 
 #i = numpy.ones([7, 5])
 #
@@ -33,7 +18,7 @@ def posiFeatures(img):
 #
 # where is the dataset stored
 dsetRoot = '/home/tbeier/datasets/weizmann_horse_db/'
-imgPath = dsetRoot + 'brown_horse/'
+imgPath = dsetRoot + 'rgb/'
 gtBasePath = dsetRoot + 'figure_ground/'
 
 imgFiles = glob.glob(imgPath+'*.jpg')
@@ -47,6 +32,9 @@ gts = []
 pbar = getPbar(len(imgFiles), 'Load Image')
 pbar.start()
 for i,path in enumerate(imgFiles):
+
+    if i>50 :
+        break
     gtPath =  gtBasePath + os.path.basename(path)
     rgbImg  = vigra.impex.readImage(path)
     gtImg  = vigra.impex.readImage(gtPath).astype('uint32')[::takeNth,::takeNth]
@@ -82,43 +70,96 @@ for i,path in enumerate(imgFiles):
 
 pbar.finish()
 
+def posiFeatures(img):
+    shape = img.shape[0:2]
+    x = numpy.linspace(0, 1, shape[0])
+    y = numpy.linspace(0, 1, shape[1])
+    xv, yv = numpy.meshgrid(y, x)
+    xv -=0.5
+    yv -=0.5
+
+    rad = numpy.sqrt(xv**2 + yv**2)[:,:,None]
+    erad = numpy.exp(1.0 - rad)
+    xva = (xv**2)[:,:,None]
+    yva = (yv**2)[:,:,None]
+
+    res = numpy.concatenate([erad, rad,xva,yva,xv[:,:,None],yv[:,:,None]],axis=2)
+    assert res.shape[0:2] == img.shape[0:2]
+    return res
+
 def getSelf(img):
-    return img
+    f=img.copy()
+    f-=f.min()
+    f/=f.max()
+    return f
 
 
 def labHessianOfGaussian(img, sigma):
     l = vigra.colors.transform_RGB2Lab(img)[:,:,0]
     l = vigra.taggedView(l,'xy')
-    return vigra.filters.hessianOfGaussianEigenvalues(l, sigma)
+    f =  vigra.filters.hessianOfGaussianEigenvalues(l, sigma)
+    f-=f.min()
+    f/=f.max()
+    return f
 
 def labStructTensorEv(img, sigma):
     l = vigra.colors.transform_RGB2Lab(img)[:,:,0]
     l = vigra.taggedView(l,'xy')
-    return vigra.filters.structureTensorEigenvalues(l, sigma, 2*sigma)
+    f = vigra.filters.structureTensorEigenvalues(l, sigma, 2*sigma)
+    f-=f.min()
+    f/=f.max()
+    return f
+
+def rgbHist(img):
+    minVals=(0.0,0.0,0.0)
+    maxVals=(255.0, 255.0, 255.0)
+    img = vigra.taggedView(img,'xyc')
+    hist = vigra.histogram.gaussianHistogram(img,minVals,maxVals,bins=30,sigma=3.0, sigmaBin=1.0)
+    f = vigra.taggedView(hist,'xyc')
+    f-=f.min()
+    f/=f.max()
+    return f
+
+
+def labHist(img):
+    minVals=(0.0,-86.1814   ,-107.862)
+    maxVals=(100.0, 98.2353, 94.48)
+    imgl= vigra.colors.transform_RGB2Lab(img)
+    hist = vigra.histogram.gaussianHistogram(imgl,minVals,maxVals,bins=30,sigma=3.0, sigmaBin=1.0)
+    f = vigra.taggedView(hist,'xyc')
+    f-=f.min()
+    f/=f.max()
+    return f
+
+def gmag(img, sigma):
+    f =  vigra.filters.gaussianGradientMagnitude(img, sigma)
+    f-=f.min()
+    f/=f.max()
+    return f
 
 fUnary = [
     posiFeatures,
+    labHist,
+    rgbHist,
     getSelf,
-    vigra.colors.transform_RGB2XYZ,
-    vigra.colors.transform_RGB2Lab,
-    vigra.colors.transform_RGB2Luv,
-    partial(labHessianOfGaussian, sigma=1.0),
-    partial(labHessianOfGaussian, sigma=2.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
-]
+    #vigra.colors.transform_RGB2XYZ,
+    #vigra.colors.transform_RGB2Lab,
+    #vigra.colors.transform_RGB2Luv,
+    #partial(labHessianOfGaussian, sigma=1.0),   
+    #partial(labHessianOfGaussian, sigma=2.0),
+    #partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
+    #partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
+]#
 
 fBinary = [
-    posiFeatures,
-    vigra.colors.transform_RGB2XYZ,
-    vigra.colors.transform_RGB2Lab,
-    vigra.colors.transform_RGB2Luv,
-    partial(labHessianOfGaussian, sigma=1.0),
-    partial(labHessianOfGaussian, sigma=2.0),
-    partial(labStructTensorEv, sigma=1.0),
-    partial(labStructTensorEv, sigma=2.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
-    partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
+    #posiFeatures,
+    ##rgbHist,
+    #partial(labHessianOfGaussian, sigma=1.0),
+    #partial(labHessianOfGaussian, sigma=2.0),
+    #partial(labStructTensorEv, sigma=1.0),
+    #partial(labStructTensorEv, sigma=2.0),
+    partial(gmag, sigma=1.0),
+    partial(gmag, sigma=2.0),
 ]
 
 
@@ -130,7 +171,7 @@ dataset,test_set = superpixelDataset(imgs=imgs,sps=sps, gts=gts, numberOfLabels=
 
 
 
-learner =  learning.subgradientSSVM(dataset, learningRate=0.1, C=100, 
+learner =  learning.subgradientSSVM(dataset, learningRate=0.1, C=0.1, 
                                     learningMode='batch',maxIterations=1000, averaging=-1)
 
 
@@ -141,6 +182,11 @@ learner.learn(infCls=opengm.inference.QpboExternal,
               parameter=opengm.InfParam())
 
 
+w = dataset.getWeights()
+
+for wi in range(len(w)):
+    print "wi ",w[wi]
+
 
 # predict on test test
 for (rgbImg, sp, gm) in test_set :
@@ -149,7 +195,6 @@ for (rgbImg, sp, gm) in test_set :
     inf.infer()
     arg = inf.arg()+1
 
-    arg  = numpy.concatenate([[0],arg])
 
     gg  = vigra.graphs.gridGraph(rgbImg.shape[0:2])
     rag = vigra.graphs.regionAdjacencyGraph(gg,sp)
diff --git a/fubar/simple_sp.py b/fubar/simple_sp.py
index dfecc33..af7cad8 100644
--- a/fubar/simple_sp.py
+++ b/fubar/simple_sp.py
@@ -13,7 +13,7 @@ from opengm.learning import secondOrderImageDataset, getPbar,superpixelDataset
 
 nImages = 20 
 shape = [100, 100]
-noise = 15.0
+noise = 8
 imgs = []
 gts = []
 sps = []
@@ -126,21 +126,18 @@ dataset,test_set = superpixelDataset(imgs=imgs,sps=sps, gts=gts, numberOfLabels=
                                           addConstFeature=True)
 
 
+ogm_ds.save("simple_dataset", 'simple_')
 
+if True:
 
+    learner =  learning.subgradientSSVM(dataset, learningRate=0.1, C=100, 
+                                        learningMode='batch',maxIterations=1000, averaging=-1)
+    learner.learn(infCls=opengm.inference.TrwsExternal, 
+                  parameter=opengm.InfParam())
 
-learner =  learning.subgradientSSVM(dataset, learningRate=0.1, C=100, 
-                                    learningMode='batch',maxIterations=1000, averaging=-1)
-
-
-#learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
-
-
-learner.learn(infCls=opengm.inference.TrwsExternal, 
-              parameter=opengm.InfParam())
-
-
-
+else:
+    learner = learning.maxLikelihoodLearner(dataset, temp=0.0000001)
+    learner.learn()
 # predict on test test
 for (rgbImg, sp, gm) in test_set :
     # infer for test image
diff --git a/include/opengm/graphicalmodel/weights.hxx b/include/opengm/graphicalmodel/weights.hxx
index 4438237..ee3ed2a 100644
--- a/include/opengm/graphicalmodel/weights.hxx
+++ b/include/opengm/graphicalmodel/weights.hxx
@@ -134,16 +134,16 @@ namespace learning{
             else if(regularizationType_ == L1Regularizer){
                 double val = 0.0;
                 for(size_t wi=0; wi<weights.size(); ++wi){
-                    val += lambda_*std::abs(weights[wi]);
+                    val += std::abs(weights[wi]);
                 }
-                return val;
+                return val*lambda_;
             }
             else { //if(regularizationType_ == L2Regularizer){
                 double val = 0.0;
                 for(size_t wi=0; wi<weights.size(); ++wi){
-                    val += lambda_*std::pow(weights[wi], 2);
+                    val += std::pow(weights[wi], 2);
                 }
-                return val;
+                return val*lambda_;
             }
         }
 
diff --git a/include/opengm/learning/subgradient_ssvm.hxx b/include/opengm/learning/subgradient_ssvm.hxx
index 1210d16..67514f9 100644
--- a/include/opengm/learning/subgradient_ssvm.hxx
+++ b/include/opengm/learning/subgradient_ssvm.hxx
@@ -117,6 +117,7 @@ namespace opengm {
         Parameter para_;
         size_t iteration_;
         FeatureAcc featureAcc_;
+        WeightRegularizer<ValueType> wReg_;
         WeightAveraging<double> weightAveraging_;
     }; 
 
@@ -126,6 +127,7 @@ namespace opengm {
         para_(p),
         iteration_(0),
         featureAcc_(ds.getNumberOfWeights()),
+        wReg_(2, 1.0/p.C_),
         weightAveraging_(ds.getWeights(),p.averaging_)
     {
         featureAcc_.resetWeights();
@@ -200,26 +202,23 @@ namespace opengm {
                 featureAcc_.resetWeights();
                 double totalLoss = 0;
 
-#ifdef WITH_OPENMP
+                #ifdef WITH_OPENMP
                 omp_lock_t modelLockUnlock;
                 omp_init_lock(&modelLockUnlock);
-
                 omp_lock_t featureAccLock;
                 omp_init_lock(&featureAccLock);
-
                 #pragma omp parallel for reduction(+:totalLoss)  
-#endif
-                for(size_t gmi=0; gmi<nModels; ++gmi)
-                {
+                #endif
+                for(size_t gmi=0; gmi<nModels; ++gmi){
                     
                     // lock the model
-#ifdef WITH_OPENMP
+                    #ifdef WITH_OPENMP
                     omp_set_lock(&modelLockUnlock);
                     dataset_.lockModel(gmi);     
                     omp_unset_lock(&modelLockUnlock);
-#else
+                    #else
                     dataset_.lockModel(gmi);     
-#endif
+                    #endif
                         
                     
 
@@ -257,24 +256,24 @@ namespace opengm {
 
                             }
                         }
-#ifdef WITH_OPENMP
+                        #ifdef WITH_OPENMP
                         omp_set_lock(&featureAccLock);
                         featureAcc_.accumulateFromOther(featureAcc);
                         omp_unset_lock(&featureAccLock);
-#else
+                        #else
                         featureAcc_.accumulateFromOther(featureAcc);
-#endif
+                        #endif
                     }
                     else{
                         FeatureAcc featureAcc(nWegihts);
                         featureAcc.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
-#ifdef WITH_OPENMP
+                        #ifdef WITH_OPENMP
                         omp_set_lock(&featureAccLock);
                         featureAcc_.accumulateFromOther(featureAcc);
                         omp_unset_lock(&featureAccLock);
-#else
+                        #else
                         featureAcc_.accumulateFromOther(featureAcc);
-#endif
+                        #endif
                     }
 
 
@@ -285,14 +284,19 @@ namespace opengm {
                     //omp_unset_lock(&featureAccLock);
 
                     // unlock the model
-#ifdef WITH_OPENMP
+                    #ifdef WITH_OPENMP
                     omp_set_lock(&modelLockUnlock);
                     dataset_.unlockModel(gmi);     
                     omp_unset_lock(&modelLockUnlock);
-#else
+                    #else
                     dataset_.unlockModel(gmi);     
-#endif
+                    #endif
+
+
                 }
+
+                //const double wRegVal = wReg_(dataset_.getWeights());
+                //const double tObj = std::abs(totalLoss) + wRegVal;
                 if(iteration_%1==0){
                     std::cout << '\r'
                               << std::setw(6) << std::setfill(' ') << iteration_ << ':'
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index a85f772..7b0aa24 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -255,12 +255,14 @@ def structMaxMarginLearner(dataset, regularizerWeight=1.0, minEps=1e-5, nSteps=0
         raise RuntimeError("this learner needs widthCplex or withGurobi")
 
 
-def maxLikelihoodLearner(dataset):
-    #raise RuntimeError("not yet implemented / wrapped fully")
+def maxLikelihoodLearner(dataset, maxIterations=1000, reg=1.0, temp=1.0):
     learnerCls = MaxLikelihood_FlexibleLoss
     learnerParamCls = MaxLikelihood_FlexibleLossParameter
 
     param = learnerParamCls()
+    param.maxIterations = int(maxIterations)
+    param.reg = float(reg)
+    param.temperature = float(temp)
     learner = learnerCls(dataset, param)
         
     return learner
@@ -456,9 +458,9 @@ def secondOrderImageDataset(imgs, gts, numberOfLabels, fUnary, fBinary, addConst
 
         # add unaries
         lUnaries = lUnaryFunctions(weights =weights,numberOfLabels = numberOfLabels, 
-                                            features=unaryFeat, weightIds = uWeightIds,
-                                            featurePolicy= FeaturePolicy.sharedBetweenLabels,
-                                            makeFirstEntryConst=numberOfLabels==2, addConstFeature=addConstFeature)
+                                    features=unaryFeat, weightIds = uWeightIds,
+                                    featurePolicy= FeaturePolicy.sharedBetweenLabels,
+                                    makeFirstEntryConst=numberOfLabels==2, addConstFeature=addConstFeature)
         fids = gm.addFunctions(lUnaries)
         gm.addFactors(fids, numpy.arange(numVar))
 
diff --git a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
index f1998b7..6490c56 100644
--- a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
+++ b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
@@ -19,12 +19,9 @@ namespace ol = opengm::learning;
 namespace opengm{
 
 
-    template<class PARAM>
-    PARAM * pyMaxLikelihoodParamConstructor(
-    ){
-        PARAM * p  = new PARAM();
-        return p;
-    }
+    template<class ML, class INF>
+    void learn()
+
 
     template<class DATASET>
     void export_max_likelihood_learner(const std::string & clsName){
@@ -35,11 +32,13 @@ namespace opengm{
         const std::string paramClsName = clsName + std::string("Parameter");
 
         bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-            //.def("__init__", make_constructor(&pyMaxLikelihoodParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
+            .def_readwrite("maxIterations", &PyLearnerParam::maxNumSteps_)
+            .def_readwrite("reg", &PyLearnerParam::reg_)
+            .def_readwrite("temperature", &PyLearnerParam::temperature_)
         ;
 
         boost::python::class_<PyLearner>( clsName.c_str(), boost::python::init<DatasetType &, const PyLearnerParam &>() )
-            .def("learn",&PyLearner::learn)
+            //.def("learn",&PyLearner::learn)
         ;
     }
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list