[opengm] 118/386: python loss param object

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:35:23 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 2d26d9cbaad4061a307cf49afdedfc618000deaf
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Thu Dec 18 14:46:55 2014 +0100

    python loss param object
---
 .../learning/loss/generalized-hammingloss.hxx      | 11 ++++
 include/opengm/learning/loss/hammingloss.hxx       | 10 +++
 include/opengm/learning/loss/noloss.hxx            | 76 +++++++++++++---------
 src/interfaces/python/opengm/learning/pyLoss.cxx   | 56 +++++++++++-----
 4 files changed, 105 insertions(+), 48 deletions(-)

diff --git a/include/opengm/learning/loss/generalized-hammingloss.hxx b/include/opengm/learning/loss/generalized-hammingloss.hxx
index b17c383..bb78154 100644
--- a/include/opengm/learning/loss/generalized-hammingloss.hxx
+++ b/include/opengm/learning/loss/generalized-hammingloss.hxx
@@ -18,8 +18,19 @@ public:
     public:
         std::vector<double> nodeLossMultiplier_;
         std::vector<double> labelLossMultiplier_;
+
+        bool operator==(const GeneralizedHammingLoss & other) const{
+                return nodeLossMultiplier_ == labelLossMultiplier_;
+        }
+        bool operator<(const GeneralizedHammingLoss & other) const{
+                return nodeLossMultiplier_ < labelLossMultiplier_;
+        }
+        bool operator>(const GeneralizedHammingLoss & other) const{
+                nodeLossMultiplier_ > labelLossMultiplier_;
+        }
     };
 
+
 public:
     GeneralizedHammingLoss(const Parameter& param = Parameter()) : param_(param){}
 
diff --git a/include/opengm/learning/loss/hammingloss.hxx b/include/opengm/learning/loss/hammingloss.hxx
index fcab161..8500f76 100644
--- a/include/opengm/learning/loss/hammingloss.hxx
+++ b/include/opengm/learning/loss/hammingloss.hxx
@@ -8,6 +8,16 @@ namespace opengm {
       class HammingLoss{
       public:
           class Parameter{
+            public:
+            bool operator==(const HammingLoss & other) const{
+                return true;
+            }
+            bool operator<(const HammingLoss & other) const{
+                return false;
+            }
+            bool operator>(const HammingLoss & other) const{
+                return false;
+            }
           };
 
       public:
diff --git a/include/opengm/learning/loss/noloss.hxx b/include/opengm/learning/loss/noloss.hxx
index ae1ec73..067bd6e 100644
--- a/include/opengm/learning/loss/noloss.hxx
+++ b/include/opengm/learning/loss/noloss.hxx
@@ -4,36 +4,52 @@
 
 #include "opengm/functions/explicit_function.hxx"
 namespace opengm {
-   namespace learning {
-      class NoLoss{
-      public:
-          class Parameter{
-          };
-
-      public:
-         NoLoss(const Parameter& param = Parameter()) : param_(param){}
-
-         template<class IT1, class IT2>
-         double loss(IT1 labelBegin, IT1 labelEnd, IT2 GTBegin,IT2 GTEnd) const;
-  
-         template<class GM, class IT>
-         void addLoss(GM& gm, IT GTBegin) const;
-      private:
-         Parameter param_;
-      };
-
-      template<class IT1, class IT2>
-      double NoLoss::loss(IT1 labelBegin, const IT1 labelEnd, IT2 GTBegin, const IT2 GTEnd) const
-      {
-         double loss = 0.0;
-         return loss;
-      }
-
-      template<class GM, class IT>
-      void NoLoss::addLoss(GM& gm, IT gt) const
-      {
-      }
-   }  
+namespace learning {
+
+    class NoLoss{
+    public:
+        class Parameter{
+        public:
+            bool operator==(const NoLoss & other) const{
+                return true;
+            }
+            bool operator<(const NoLoss & other) const{
+                return false;
+            }
+            bool operator>(const NoLoss & other) const{
+                return false;
+            }
+        };
+
+    public:
+        NoLoss(const Parameter& param = Parameter()) 
+        : param_(param){
+
+        }
+
+        template<class IT1, class IT2>
+        double loss(IT1 labelBegin, IT1 labelEnd, IT2 GTBegin,IT2 GTEnd) const;
+
+        template<class GM, class IT>
+        void addLoss(GM& gm, IT GTBegin) const;
+    private:
+        Parameter param_;
+
+    };
+
+    template<class IT1, class IT2>
+    double NoLoss::loss(IT1 labelBegin, const IT1 labelEnd, IT2 GTBegin, const IT2 GTEnd) const
+    {
+        double loss = 0.0;
+        return loss;
+    }
+
+    template<class GM, class IT>
+    void NoLoss::addLoss(GM& gm, IT gt) const
+    {
+    }
+
+}  
 } // namespace opengm
 
 #endif 
diff --git a/src/interfaces/python/opengm/learning/pyLoss.cxx b/src/interfaces/python/opengm/learning/pyLoss.cxx
index 85c256e..abfde86 100644
--- a/src/interfaces/python/opengm/learning/pyLoss.cxx
+++ b/src/interfaces/python/opengm/learning/pyLoss.cxx
@@ -30,30 +30,50 @@ void export_loss(){
    typedef typename std::vector<typename GM::LabelType>::const_iterator Literator;
    typedef typename std::vector<typename GM::LabelType>::const_iterator Niterator;
    typedef opengm::learning::HammingLoss PyHammingLoss;
-   typedef opengm::learning::NoLoss PyNoLoss;
    typedef opengm::learning::GeneralizedHammingLoss PyGeneralizedHammingLoss;
+   typedef opengm::learning::NoLoss PyNoLoss;
+
+
+
+    typedef opengm::learning::GeneralizedHammingLoss::Parameter PyGeneralizedHammingLossParameter;
+
+    class_<PyHammingLoss >("HammingLoss")
+        .def("loss", &PyHammingLoss::loss<Literator,Literator>)
+        .def("addLoss", &PyHammingLoss::addLoss<GM, Literator>)
+    ;
+
+    class_<PyNoLoss >("NoLoss")
+        .def("loss", &PyNoLoss::loss<Literator,Literator>)
+        .def("addLoss", &PyNoLoss::addLoss<GM, Literator>)
+    ;
+
+    class_<PyGeneralizedHammingLoss >("GeneralizedHammingLoss", init<PyGeneralizedHammingLossParameter>())
+        .def("loss", &PyGeneralizedHammingLoss::loss<Literator,Literator>)
+        .def("addLoss", &PyGeneralizedHammingLoss::addLoss<GM, Literator>)
+    ;
+
 
-   typedef opengm::learning::GeneralizedHammingLoss::Parameter PyGeneralizedHammingLossParameter;
+    class_<PyNoLoss::Parameter>("NoLossParameter")
+    ;
 
-   class_<PyHammingLoss >("HammingLoss")
-           .def("loss", &PyHammingLoss::loss<Literator,Literator>)
-           .def("addLoss", &PyHammingLoss::addLoss<GM, Literator>)
-   ;
+    class_<PyHammingLoss::Parameter>("HammingLossParameter")
+    ;
 
-   class_<PyNoLoss >("NoLoss")
-           .def("loss", &PyNoLoss::loss<Literator,Literator>)
-           .def("addLoss", &PyNoLoss::addLoss<GM, Literator>)
-   ;
+    class_<PyGeneralizedHammingLossParameter>("GeneralizedHammingLossParameter")
+        .def("setNodeLossMultiplier", &pySetNodeLossMultiplier)
+        .def("setLabelLossMultiplier", &pySetLabelLossMultiplier)
+    ;
 
-   class_<PyGeneralizedHammingLoss >("GeneralizedHammingLoss", init<PyGeneralizedHammingLossParameter>())
-           .def("loss", &PyGeneralizedHammingLoss::loss<Literator,Literator>)
-           .def("addLoss", &PyGeneralizedHammingLoss::addLoss<GM, Literator>)
-   ;
+    class_<std::vector< PyNoLoss::Parameter > >("NoLossParameterVector")
+        .def(vector_indexing_suite<std::vector< PyNoLoss::Parameter> >())
+    ;
+    class_<std::vector< PyHammingLoss::Parameter > >("HammingLossParameterVector")
+        .def(vector_indexing_suite<std::vector< PyHammingLoss::Parameter> >())
+    ;
+    class_<std::vector< PyGeneralizedHammingLoss::Parameter > >("GeneralizedHammingLossParameterVector")
+        .def(vector_indexing_suite<std::vector< PyGeneralizedHammingLoss::Parameter> >())
+    ;
 
-   class_<PyGeneralizedHammingLossParameter>("GeneralizedHammingLossParameter")
-           .def("setNodeLossMultiplier", &pySetNodeLossMultiplier)
-           .def("setLabelLossMultiplier", &pySetLabelLossMultiplier)
-   ;
 }
 
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list