[caffe-contrib] 05/08: patch: update patchstack

Zhou Mo cdluminate-guest at moszumanska.debian.org
Mon May 1 01:32:57 UTC 2017


This is an automated email from the git hooks/post-receive script.

cdluminate-guest pushed a commit to branch master
in repository caffe-contrib.

commit 8e4708aa8c816d279bc51c44085dac6f44e52ba8
Author: Zhou Mo <cdluminate at gmail.com>
Date:   Mon May 1 01:25:53 2017 +0000

    patch: update patchstack
---
 debian/changelog                                   |   5 +
 debian/patches/cmake-using-gnuinstalldirs          |  49 ------
 .../fix-more-float-comparison-precision-issue      |  73 ---------
 debian/patches/post_rc5_upstream_updates.patch     | 174 ---------------------
 debian/patches/series                              |   3 -
 5 files changed, 5 insertions(+), 299 deletions(-)

diff --git a/debian/changelog b/debian/changelog
index c57e4cb..0f8f383 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,6 +1,11 @@
 caffe-contrib (1.0.0-1) UNRELEASED; urgency=medium
 
   * New upstream release.
+  * Patchset update:
+    - Remove post_rc5_upstream_updates.patch (included in upstream).
+    - Remove cmake-using-gnuinstalldirs (merged).
+    - Remove fix-more-float-comparison-precision-issue (merged).
+
 
  -- Zhou Mo <cdluminate at gmail.com>  Mon, 01 May 2017 01:22:35 +0000
 
diff --git a/debian/patches/cmake-using-gnuinstalldirs b/debian/patches/cmake-using-gnuinstalldirs
deleted file mode 100644
index b7837d1..0000000
--- a/debian/patches/cmake-using-gnuinstalldirs
+++ /dev/null
@@ -1,49 +0,0 @@
-using GNUInstallDirs cmake module and fix install paths for Debian
-
-Forwarded: yes
-https://github.com/BVLC/caffe/pull/4237
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -18,6 +18,7 @@
- list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/Modules)
- 
- include(ExternalProject)
-+include(GNUInstallDirs)
- 
- include(cmake/Utils.cmake)
- include(cmake/Targets.cmake)
---- a/src/caffe/CMakeLists.txt
-+++ b/src/caffe/CMakeLists.txt
-@@ -40,9 +40,9 @@
-  add_subdirectory(test)
- 
- # ---[ Install
--install(DIRECTORY ${Caffe_INCLUDE_DIR}/caffe DESTINATION include)
--install(FILES ${proto_hdrs} DESTINATION include/caffe/proto)
--install(TARGETS caffe proto EXPORT CaffeTargets DESTINATION lib)
-+install(DIRECTORY ${Caffe_INCLUDE_DIR}/caffe DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
-+install(FILES ${proto_hdrs} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/caffe/proto)
-+install(TARGETS caffe proto EXPORT CaffeTargets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- 
- file(WRITE ${PROJECT_BINARY_DIR}/__init__.py)
- list(APPEND proto_python ${PROJECT_BINARY_DIR}/__init__.py)
---- a/tools/CMakeLists.txt
-+++ b/tools/CMakeLists.txt
-@@ -25,5 +25,5 @@
-   endif()
- 
-   # Install
--  install(TARGETS ${name} DESTINATION bin)
-+  install(TARGETS ${name} DESTINATION ${CMAKE_INSTALL_BINDIR})
- endforeach(source)
---- a/examples/CMakeLists.txt
-+++ b/examples/CMakeLists.txt
-@@ -19,7 +19,7 @@
-   caffe_set_solution_folder(${name} examples)
- 
-   # install
--  install(TARGETS ${name} DESTINATION bin)
-+  install(TARGETS ${name} DESTINATION ${CMAKE_INSTALL_BINDIR})
- 
-   if(UNIX OR APPLE)
-     # Funny command to make tutorials work
diff --git a/debian/patches/fix-more-float-comparison-precision-issue b/debian/patches/fix-more-float-comparison-precision-issue
deleted file mode 100644
index 2f8e248..0000000
--- a/debian/patches/fix-more-float-comparison-precision-issue
+++ /dev/null
@@ -1,73 +0,0 @@
-Forward: [yes] https://github.com/BVLC/caffe/pull/4566
-
-commit d607858b90b645d8177c3970d782f0ab5c529558
-Author: Zhou Mo <cdluminate at gmail.com>
-Date:   Tue Aug 9 15:13:47 2016 +0000
-
-    Fix more float comparison precision issue
-    
-    With reference to this commit:
-    f1a8470aa21e35a5b2bb83007f8fb7680a354815
-    
-    This fix changes some EXPECT_EQ into EXPECT_FLOAT_EQ .
-
---- a/src/caffe/test/test_convolution_layer.cpp
-+++ b/src/caffe/test/test_convolution_layer.cpp
-@@ -695,7 +695,7 @@
-   }
-   ASSERT_EQ(backward_result_nd.count(), backward_result_2d.count());
-   for (int i = 0; i < backward_result_2d.count(); ++i) {
--    EXPECT_EQ(backward_result_2d.cpu_diff()[i],
-+    EXPECT_FLOAT_EQ(backward_result_2d.cpu_diff()[i],
-               backward_result_nd.cpu_diff()[i]);
-   }
-   ASSERT_EQ(backward_weight_result_nd.count(),
---- a/src/caffe/test/test_gradient_based_solver.cpp
-+++ b/src/caffe/test/test_gradient_based_solver.cpp
-@@ -558,9 +558,9 @@
-     const vector<Blob<Dtype>*>& params = solver_->net()->learnable_params();
-     for (int i = 0; i < params.size(); ++i) {
-       for (int j = 0; j < params[i]->count(); ++j) {
--        EXPECT_EQ(param_copies[i]->cpu_data()[j], params[i]->cpu_data()[j])
-+        EXPECT_FLOAT_EQ(param_copies[i]->cpu_data()[j], params[i]->cpu_data()[j])
-             << "param " << i << " data differed at dim " << j;
--        EXPECT_EQ(param_copies[i]->cpu_diff()[j], params[i]->cpu_diff()[j])
-+        EXPECT_FLOAT_EQ(param_copies[i]->cpu_diff()[j], params[i]->cpu_diff()[j])
-             << "param " << i << " diff differed at dim " << j;
-       }
-     }
-@@ -569,9 +569,9 @@
-     const vector<shared_ptr<Blob<Dtype> > >& history = solver_->history();
-     for (int i = 0; i < history.size(); ++i) {
-       for (int j = 0; j < history[i]->count(); ++j) {
--        EXPECT_EQ(history_copies[i]->cpu_data()[j], history[i]->cpu_data()[j])
-+        EXPECT_FLOAT_EQ(history_copies[i]->cpu_data()[j], history[i]->cpu_data()[j])
-             << "history blob " << i << " data differed at dim " << j;
--        EXPECT_EQ(history_copies[i]->cpu_diff()[j], history[i]->cpu_diff()[j])
-+        EXPECT_FLOAT_EQ(history_copies[i]->cpu_diff()[j], history[i]->cpu_diff()[j])
-             << "history blob " << i << " diff differed at dim " << j;
-       }
-     }
---- a/src/caffe/test/test_neuron_layer.cpp
-+++ b/src/caffe/test/test_neuron_layer.cpp
-@@ -791,16 +791,16 @@
-   ip2.Backward(blob_middle_vec_2, propagate_down, blob_bottom_vec_2);
-   // Check numbers
-   for (int s = 0; s < blob_bottom_2->count(); ++s) {
--    EXPECT_EQ(this->blob_bottom_->cpu_diff()[s], blob_bottom_2->cpu_diff()[s]);
-+    EXPECT_FLOAT_EQ(this->blob_bottom_->cpu_diff()[s], blob_bottom_2->cpu_diff()[s]);
-   }
-   for (int s = 0; s < ip.blobs()[0]->count(); ++s) {
--    EXPECT_EQ(ip.blobs()[0]->cpu_diff()[s], ip2.blobs()[0]->cpu_diff()[s]);
-+    EXPECT_FLOAT_EQ(ip.blobs()[0]->cpu_diff()[s], ip2.blobs()[0]->cpu_diff()[s]);
-   }
-   for (int s = 0; s < ip.blobs()[1]->count(); ++s) {
--    EXPECT_EQ(ip.blobs()[1]->cpu_diff()[s], ip2.blobs()[1]->cpu_diff()[s]);
-+    EXPECT_FLOAT_EQ(ip.blobs()[1]->cpu_diff()[s], ip2.blobs()[1]->cpu_diff()[s]);
-   }
-   for (int s = 0; s < prelu.blobs()[0]->count(); ++s) {
--    EXPECT_EQ(prelu.blobs()[0]->cpu_diff()[s],
-+    EXPECT_FLOAT_EQ(prelu.blobs()[0]->cpu_diff()[s],
-         prelu2.blobs()[0]->cpu_diff()[s]);
-   }
- }
diff --git a/debian/patches/post_rc5_upstream_updates.patch b/debian/patches/post_rc5_upstream_updates.patch
deleted file mode 100644
index b822197..0000000
--- a/debian/patches/post_rc5_upstream_updates.patch
+++ /dev/null
@@ -1,174 +0,0 @@
-diff --git a/docs/tutorial/layers.md b/docs/tutorial/layers.md
-index a903d5ac..2faacc58 100644
---- a/docs/tutorial/layers.md
-+++ b/docs/tutorial/layers.md
-@@ -128,7 +128,7 @@ Layers:
- * [Infogain Loss](layers/infogainloss.html) - a generalization of MultinomialLogisticLossLayer.
- * [Softmax with Loss](layers/softmaxwithloss.html) - computes the multinomial logistic loss of the softmax of its inputs. It's conceptually identical to a softmax layer followed by a multinomial logistic loss layer, but provides a more numerically stable gradient.
- * [Sum-of-Squares / Euclidean](layers/euclideanloss.html) - computes the sum of squares of differences of its two inputs, $$\frac 1 {2N} \sum_{i=1}^N \| x^1_i - x^2_i \|_2^2$$.
--* [Hinge / Margin](layers/hiddenloss.html) - The hinge loss layer computes a one-vs-all hinge (L1) or squared hinge loss (L2).
-+* [Hinge / Margin](layers/hingeloss.html) - The hinge loss layer computes a one-vs-all hinge (L1) or squared hinge loss (L2).
- * [Sigmoid Cross-Entropy Loss](layers/sigmoidcrossentropyloss.html) - computes the cross-entropy (logistic) loss, often used for predicting targets interpreted as probabilities.
- * [Accuracy / Top-k layer](layers/accuracy.html) - scores the output as an accuracy with respect to target -- it is not actually a loss and has no backward step.
- * [Contrastive Loss](layers/contrastiveloss.html)
-diff --git a/include/caffe/util/hdf5.hpp b/include/caffe/util/hdf5.hpp
-index ce568c5e..71549c1c 100644
---- a/include/caffe/util/hdf5.hpp
-+++ b/include/caffe/util/hdf5.hpp
-@@ -13,12 +13,12 @@ namespace caffe {
- template <typename Dtype>
- void hdf5_load_nd_dataset_helper(
-     hid_t file_id, const char* dataset_name_, int min_dim, int max_dim,
--    Blob<Dtype>* blob);
-+    Blob<Dtype>* blob, bool reshape);
- 
- template <typename Dtype>
- void hdf5_load_nd_dataset(
-     hid_t file_id, const char* dataset_name_, int min_dim, int max_dim,
--    Blob<Dtype>* blob);
-+    Blob<Dtype>* blob, bool reshape = false);
- 
- template <typename Dtype>
- void hdf5_save_nd_dataset(
-diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp
-index 3589e476..be011699 100644
---- a/python/caffe/_caffe.cpp
-+++ b/python/caffe/_caffe.cpp
-@@ -288,7 +288,7 @@ void Solver_add_callback(Solver<Dtype> * solver, bp::object on_start,
- }
- 
- // Seems boost cannot call the base method directly
--void Solver_add_nccl(SGDSolver<Dtype>* solver
-+void Solver_add_nccl(Solver<Dtype>* solver
- #ifdef USE_NCCL
-   , NCCL<Dtype>* nccl
- #endif
-diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp
-index b9a071ce..00716a92 100644
---- a/src/caffe/layers/hdf5_data_layer.cpp
-+++ b/src/caffe/layers/hdf5_data_layer.cpp
-@@ -39,8 +39,9 @@ void HDF5DataLayer<Dtype>::LoadHDF5FileData(const char* filename) {
- 
-   for (int i = 0; i < top_size; ++i) {
-     hdf_blobs_[i] = shared_ptr<Blob<Dtype> >(new Blob<Dtype>());
-+    // Allow reshape here, as we are loading data not params
-     hdf5_load_nd_dataset(file_id, this->layer_param_.top(i).c_str(),
--        MIN_DATA_DIM, MAX_DATA_DIM, hdf_blobs_[i].get());
-+        MIN_DATA_DIM, MAX_DATA_DIM, hdf_blobs_[i].get(), true);
-   }
- 
-   herr_t status = H5Fclose(file_id);
-diff --git a/src/caffe/test/test_hdf5_output_layer.cpp b/src/caffe/test/test_hdf5_output_layer.cpp
-index 3833ebff..2bc2de1e 100644
---- a/src/caffe/test/test_hdf5_output_layer.cpp
-+++ b/src/caffe/test/test_hdf5_output_layer.cpp
-@@ -77,10 +77,12 @@ TYPED_TEST(HDF5OutputLayerTest, TestForward) {
-                           H5P_DEFAULT);
-   ASSERT_GE(file_id, 0)<< "Failed to open HDF5 file" <<
-       this->input_file_name_;
-+  // Allow reshape here as we are loading data not params
-+  bool reshape = true;
-   hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4,
--                       this->blob_data_);
-+                       this->blob_data_, reshape);
-   hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4,
--                       this->blob_label_);
-+                       this->blob_label_, reshape);
-   herr_t status = H5Fclose(file_id);
-   EXPECT_GE(status, 0)<< "Failed to close HDF5 file " <<
-       this->input_file_name_;
-@@ -105,12 +107,12 @@ TYPED_TEST(HDF5OutputLayerTest, TestForward) {
- 
-   Blob<Dtype>* blob_data = new Blob<Dtype>();
-   hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4,
--                       blob_data);
-+                       blob_data, reshape);
-   this->CheckBlobEqual(*(this->blob_data_), *blob_data);
- 
-   Blob<Dtype>* blob_label = new Blob<Dtype>();
-   hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4,
--                       blob_label);
-+                       blob_label, reshape);
-   this->CheckBlobEqual(*(this->blob_label_), *blob_label);
- 
-   status = H5Fclose(file_id);
-diff --git a/src/caffe/test/test_hdf5data_layer.cpp b/src/caffe/test/test_hdf5data_layer.cpp
-index 68e10286..487f5176 100644
---- a/src/caffe/test/test_hdf5data_layer.cpp
-+++ b/src/caffe/test/test_hdf5data_layer.cpp
-@@ -70,7 +70,7 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) {
-   int height = 6;
-   int width = 5;
- 
--  // Test that the layer setup got the correct parameters.
-+  // Test that the layer setup gives correct parameters.
-   HDF5DataLayer<Dtype> layer(param);
-   layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
-   EXPECT_EQ(this->blob_top_data_->num(), batch_size);
-diff --git a/src/caffe/util/hdf5.cpp b/src/caffe/util/hdf5.cpp
-index d255877b..ed737429 100644
---- a/src/caffe/util/hdf5.cpp
-+++ b/src/caffe/util/hdf5.cpp
-@@ -9,7 +9,7 @@ namespace caffe {
- template <typename Dtype>
- void hdf5_load_nd_dataset_helper(
-     hid_t file_id, const char* dataset_name_, int min_dim, int max_dim,
--    Blob<Dtype>* blob) {
-+    Blob<Dtype>* blob, bool reshape) {
-   // Verify that the dataset exists.
-   CHECK(H5LTfind_dataset(file_id, dataset_name_))
-       << "Failed to find HDF5 dataset " << dataset_name_;
-@@ -56,17 +56,38 @@ void hdf5_load_nd_dataset_helper(
-     LOG(FATAL) << "Datatype class unknown";
-   }
- 
-+
-   vector<int> blob_dims(dims.size());
-   for (int i = 0; i < dims.size(); ++i) {
-     blob_dims[i] = dims[i];
-   }
--  blob->Reshape(blob_dims);
-+
-+  if (reshape) {
-+    blob->Reshape(blob_dims);
-+  } else {
-+    if (blob_dims != blob->shape()) {
-+      // create shape string for error message
-+      ostringstream stream;
-+      int count = 1;
-+      for (int i = 0; i < blob_dims.size(); ++i) {
-+        stream << blob_dims[i] << " ";
-+        count = count * blob_dims[i];
-+      }
-+      stream << "(" << count << ")";
-+      string source_shape_string = stream.str();
-+
-+      CHECK(blob_dims == blob->shape()) << "Cannot load blob from hdf5; shape "
-+            << "mismatch. Source shape is " << source_shape_string
-+            << " target shape is " << blob->shape_string();
-+    }
-+  }
- }
- 
- template <>
- void hdf5_load_nd_dataset<float>(hid_t file_id, const char* dataset_name_,
--        int min_dim, int max_dim, Blob<float>* blob) {
--  hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob);
-+        int min_dim, int max_dim, Blob<float>* blob, bool reshape) {
-+  hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob,
-+                              reshape);
-   herr_t status = H5LTread_dataset_float(
-     file_id, dataset_name_, blob->mutable_cpu_data());
-   CHECK_GE(status, 0) << "Failed to read float dataset " << dataset_name_;
-@@ -74,8 +95,9 @@ void hdf5_load_nd_dataset<float>(hid_t file_id, const char* dataset_name_,
- 
- template <>
- void hdf5_load_nd_dataset<double>(hid_t file_id, const char* dataset_name_,
--        int min_dim, int max_dim, Blob<double>* blob) {
--  hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob);
-+        int min_dim, int max_dim, Blob<double>* blob, bool reshape) {
-+  hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob,
-+                              reshape);
-   herr_t status = H5LTread_dataset_double(
-     file_id, dataset_name_, blob->mutable_cpu_data());
-   CHECK_GE(status, 0) << "Failed to read double dataset " << dataset_name_;
diff --git a/debian/patches/series b/debian/patches/series
index ee52507..e6a2974 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,7 +1,4 @@
 cmake-using-basic-blas
-cmake-using-gnuinstalldirs
 cmake-fix-python-module-installdir
-fix-more-float-comparison-precision-issue
 fix-more-float-comparison-precision-issue2
 cmake-link-correct-python-lib.patch
-post_rc5_upstream_updates.patch

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/caffe-contrib.git



More information about the debian-science-commits mailing list