[opengm] 183/386: implemented getLoss for dataset and almost brought it to python, still to be fixed: pyDataset

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:36 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 79f812d575a7b4ccd81eaf6aa36f608674f1cc04
Author: mschiegg <martin.schiegg at iwr.uni-heidelberg.de>
Date:   Fri Dec 19 18:08:20 2014 +0100

    implemented getLoss for dataset and almost brought it to python, still to be fixed: pyDataset
---
 include/opengm/learning/dataset/dataset.hxx        | 32 +++++++++-
 src/interfaces/python/opengm/learning/__init__.py  | 21 ++++++-
 src/interfaces/python/opengm/learning/helper.hxx   | 69 ++++++++++++++++++++++
 .../python/opengm/learning/pyDataset.cxx           |  5 +-
 4 files changed, 124 insertions(+), 3 deletions(-)

diff --git a/include/opengm/learning/dataset/dataset.hxx b/include/opengm/learning/dataset/dataset.hxx
index f9c1a6e..3168c15 100644
--- a/include/opengm/learning/dataset/dataset.hxx
+++ b/include/opengm/learning/dataset/dataset.hxx
@@ -32,6 +32,12 @@ namespace opengm {
          Weights&                      getWeights()                            { return weights_; } 
          size_t                        getNumberOfWeights() const              { return weights_.numberOfWeights(); }
          size_t                        getNumberOfModels() const               { return gms_.size(); } 
+
+         template<class INF>
+         ValueType                     getTotalLoss(const typename INF::Parameter& para) const;
+
+         template<class INF>
+         ValueType                     getLoss(const typename INF::Parameter& para, const size_t i) const;
          
          Dataset(size_t numInstances=0);
         //void loadAll(std::string path,std::string prefix); 
@@ -52,7 +58,6 @@ namespace opengm {
       };
       
 
-
       template<class GM, class LOSS>
       Dataset<GM, LOSS>::Dataset(size_t numInstances)
           : count_(std::vector<size_t>(numInstances)),
@@ -65,6 +70,31 @@ namespace opengm {
       {
       }
 
+      template<class GM, class LOSS>
+      template<class INF>
+      typename GM::ValueType Dataset<GM, LOSS>::getTotalLoss(const typename INF::Parameter& para) const {
+          ValueType sum=0;
+          for(size_t i=0; i<this->getNumberOfModels(); ++i) {
+             sum += this->getLoss<INF>(para, i);
+          }
+          return sum;
+      }
+
+      template<class GM, class LOSS>
+      template<class INF>
+      typename GM::ValueType Dataset<GM, LOSS>::getLoss(const typename INF::Parameter& para, const size_t i) const {
+          LOSS lossFunction(lossParams_[i]);
+          const GM& gm = this->getModel(i);
+          const std::vector<typename INF::LabelType>& gt =  this->getGT(i);
+
+          std::vector<typename INF::LabelType> conf;
+          INF inf(gm,para);
+          inf.infer();
+          inf.arg(conf);
+
+          return lossFunction.loss(gm, conf.begin(), conf.end(), gt.begin(), gt.end());
+
+      }
 
      template<class GM, class LOSS>
      void Dataset<GM, LOSS>::buildModelWithLoss(size_t i){
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index 7a7620e..0e35706 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -17,6 +17,20 @@ def _extendedLearn(self, infCls, parameter = None):
     cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
     self._learn(cppParam)
 
+def _extendedGetLoss(self, model_idx, infCls, parameter = None):
+    if parameter is None:
+        import opengm
+        parameter = opengm.InfParam()
+    cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
+    self._getLoss(cppParam, model_idx)
+
+def _extendedGetTotalLoss(self, infCls, parameter = None):
+    if parameter is None:
+        import opengm
+        parameter = opengm.InfParam()
+    cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
+    self._getTotalLoss(cppParam)
+
 GridSearch_HammingLoss.learn  =_extendedLearn
 GridSearch_GeneralizedHammingLoss.learn  =_extendedLearn
 
@@ -26,7 +40,12 @@ MaxLikelihood_GeneralizedHammingLoss.learn  =_extendedLearn
 if opengmConfig.withCplex or opengmConfig.withGurobi :
     StructMaxMargin_Bundle_HammingLoss.learn = _extendedLearn
     StructMaxMargin_Bundle_GeneralizedHammingLoss.learn = _extendedLearn
-        
+
+DatasetWithHammingLoss.getLoss = _extendedGetLoss
+DatasetWithHammingLoss.getTotalLoss = _extendedGetTotalLoss
+DatasetWithGeneralizedHammingLoss.getLoss = _extendedGetLoss
+DatasetWithGeneralizedHammingLoss.getTotalLoss = _extendedGetTotalLoss
+
 def createDataset(numWeights, loss='hamming', numInstances=0):
     weightVals = numpy.ones(numWeights)
     weights = Weights(weightVals)
diff --git a/src/interfaces/python/opengm/learning/helper.hxx b/src/interfaces/python/opengm/learning/helper.hxx
index eeabf68..72bd562 100644
--- a/src/interfaces/python/opengm/learning/helper.hxx
+++ b/src/interfaces/python/opengm/learning/helper.hxx
@@ -82,6 +82,75 @@ public:
    }
 };
 
+
+
+template<class DS>
+class DatasetInferenceSuite: public boost::python::def_visitor<DatasetInferenceSuite<DS> >{
+public:
+   friend class boost::python::def_visitor_access;
+
+   DatasetInferenceSuite(){
+
+   }
+
+   template<class INF>
+   typename DS::ValueType pyGetLossWithInf(DS & ds, const typename INF::Parameter & param, const size_t i)
+   {
+       return ds. template getLoss<INF>(param, i);
+   }
+
+   template<class INF>
+   typename DS::ValueType pyGetTotalLossWithInf(DS & ds, const typename INF::Parameter & param)
+   {
+       return ds. template getTotalLoss<INF>(param);
+   }
+
+   template <class classT>
+   void visit(classT& c) const{
+       // SOME INFERENCE METHODS
+       typedef typename DS::GMType GMType;
+       typedef opengm::Minimizer ACC;
+
+       typedef opengm::ICM<GMType, ACC> IcmInf;
+       typedef opengm::LazyFlipper<GMType, ACC> LazyFlipperInf;
+       typedef opengm::BeliefPropagationUpdateRules<GMType, ACC> UpdateRulesType;
+       typedef opengm::MessagePassing<GMType, ACC, UpdateRulesType, opengm::MaxDistance> BpInf;
+
+#ifdef WITH_CPLEX
+       typedef opengm::LPCplex<GMType, ACC> Cplex;
+#endif
+#ifdef WITH_QPBO
+       typedef opengm::external::QPBO<GMType>  QpboExternal;
+#endif
+#ifdef WITH_QPBO
+       typedef opengm::external::TRWS<GMType>  TrwsExternal;
+#endif
+
+      c
+          .def("_getLoss",&pyGetLossWithInf<IcmInf>)
+          .def("_getTotalLoss",&pyGetTotalLossWithInf<IcmInf>)
+          .def("_getLoss",&pyGetLossWithInf<LazyFlipperInf>)
+          .def("_getTotalLoss",&pyGetTotalLossWithInf<LazyFlipperInf>)
+          .def("_getLoss",&pyGetLossWithInf<BpInf>)
+          .def("_getTotalLoss",&pyGetTotalLossWithInf<BpInf>)
+#ifdef WITH_CPLEX
+          .def("_getLoss",&pyGetLossWithInf<Cplex>)
+          .def("_getTotalLoss",&pyGetTotalLossWithInf<Cplex>)
+#endif
+#ifdef WITH_QPBO
+          .def("_getLoss",&pyGetLossWithInf<QpboExternal>)
+          .def("_getTotalLoss",&pyGetTotalLossWithInf<QpboExternal>)
+#endif
+#ifdef WITH_TRWS
+          .def("_getLoss",&pyGetLossWithInf<TrwsExternal>)
+          .def("_getTotalLoss",&pyGetTotalLossWithInf<TrwsExternal>)
+#endif
+      ;
+   }
+};
+
+
+
 } // namespace opengm
 
 #endif // HELPER_HXX
diff --git a/src/interfaces/python/opengm/learning/pyDataset.cxx b/src/interfaces/python/opengm/learning/pyDataset.cxx
index 33cff14..b23bb71 100644
--- a/src/interfaces/python/opengm/learning/pyDataset.cxx
+++ b/src/interfaces/python/opengm/learning/pyDataset.cxx
@@ -11,6 +11,7 @@
 #include <opengm/python/numpyview.hxx>
 #include <opengm/python/opengmpython.hxx>
 #include <opengm/python/converter.hxx>
+#include "helper.hxx"
 
 using namespace boost::python;
 
@@ -70,7 +71,8 @@ template<class GM, class LOSS>
 void export_dataset(const std::string& className){
     typedef opengm::datasets::EditableDataset<GM,LOSS > PyDataset;
 
-   class_<PyDataset > (className.c_str(), init<size_t> ())
+   class_<PyDataset > (className.c_str(), boost::python::no_init)
+           .def(init<size_t>())
            .def("lockModel", &PyDataset::lockModel)
            .def("unlockModel", &PyDataset::unlockModel)
            .def("getModel", &PyDataset::getModel, return_internal_reference<>())
@@ -86,6 +88,7 @@ void export_dataset(const std::string& className){
            .def("setWeights", &PyDataset::setWeights)
            .def("save", &pySaveDataset<GM, LOSS>)
            .def("load", &pyLoadDataset<GM, LOSS>)
+//           .def(DatasetInferenceSuite<PyDataset>()) //FIXME
    ;
 
 }

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list