[lua-torch-nn] 01/04: patch: add patch fix-spelling-errors

Zhou Mo cdluminate-guest at moszumanska.debian.org
Sat Aug 13 15:41:24 UTC 2016


This is an automated email from the git hooks/post-receive script.

cdluminate-guest pushed a commit to branch master
in repository lua-torch-nn.

commit 857312fc819fc54440b40d49b1585c278ce2826a
Author: Zhou Mo <cdluminate at gmail.com>
Date:   Sat Aug 13 15:34:21 2016 +0000

    patch: add patch fix-spelling-errors
---
 debian/patches/fix-spelling-errors | 157 +++++++++++++++++++++++++++++++++++++
 debian/patches/series              |   1 +
 2 files changed, 158 insertions(+)

diff --git a/debian/patches/fix-spelling-errors b/debian/patches/fix-spelling-errors
new file mode 100644
index 0000000..58a5268
--- /dev/null
+++ b/debian/patches/fix-spelling-errors
@@ -0,0 +1,157 @@
+diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
+index d4da7c9..92574db 100644
+--- a/CONTRIBUTING.md
++++ b/CONTRIBUTING.md
+@@ -22,7 +22,7 @@ restrictions:
+   [mailing-list](http://groups.google.com/forum/#!forum/torch7)).
+ 
+ * Please **do not** open issues regarding the code in a torch package 
+-  outside the core. For example dont open issues about the 
++  outside the core. For example don't open issues about the 
+   REPL in the nn issue tracker, use the trepl issue tracker for that.
+ 
+ <a name="bugs"></a>
+diff --git a/ClassSimplexCriterion.lua b/ClassSimplexCriterion.lua
+index 6ccaed9..9cabc01 100644
+--- a/ClassSimplexCriterion.lua
++++ b/ClassSimplexCriterion.lua
+@@ -64,7 +64,7 @@ function ClassSimplexCriterion:__init(nClasses)
+ end
+ 
+ -- handle target being both 1D tensor, and
+--- target being 2D tensor (2D tensor means dont do anything)
++-- target being 2D tensor (2D tensor means don't do anything)
+ local function transformTarget(self, target)
+     if torch.type(target) == 'number' then
+         self._target:resize(self.nClasses)
+diff --git a/Container.lua b/Container.lua
+index 6af4d7d..469a370 100644
+--- a/Container.lua
++++ b/Container.lua
+@@ -22,7 +22,7 @@ end
+ 
+ -- Check if passing arguments through xpcall is supported in this Lua interpreter.
+ local _, XPCALL_ARGS = xpcall(function(x) return x ~= nil end, function() end, 1)
+-local TRACEBACK_WARNING = "WARNING: If you see a stack trace below, it doesn't point to the place where this error occured. Please use only the one above."
++local TRACEBACK_WARNING = "WARNING: If you see a stack trace below, it doesn't point to the place where this error occurred. Please use only the one above."
+ -- module argument can be retrieved with moduleIndex, but code is cleaner when
+ -- it has to be specified anyway.
+ function Container:rethrowErrors(module, moduleIndex, funcName, ...)
+diff --git a/LookupTable.lua b/LookupTable.lua
+index 8a60354..cf9c687 100644
+--- a/LookupTable.lua
++++ b/LookupTable.lua
+@@ -125,7 +125,7 @@ function LookupTable:renorm(input)
+    if not self.maxNorm then
+       return
+    end
+-   -- copy input into _input, so _input is continous.
++   -- copy input into _input, so _input is continuous.
+    -- The copied _input will be modified in the C code.
+    self._input:resize(input:size()):copy(input)
+    local row_idx = self._input
+diff --git a/SpatialDropout.lua b/SpatialDropout.lua
+index 35daa18..99cd0fc 100644
+--- a/SpatialDropout.lua
++++ b/SpatialDropout.lua
+@@ -19,7 +19,7 @@ function SpatialDropout:updateOutput(input)
+       end
+       self.noise:bernoulli(1-self.p)
+       -- We expand the random dropouts to the entire feature map because the
+-      -- features are likely correlated accross the map and so the dropout
++      -- features are likely correlated across the map and so the dropout
+       -- should also be correlated.
+       self.output:cmul(torch.expandAs(self.noise, input))
+    else
+diff --git a/Sum.lua b/Sum.lua
+index 5d61c28..9ff73f8 100644
+--- a/Sum.lua
++++ b/Sum.lua
+@@ -36,8 +36,8 @@ end
+ 
+ function Sum:updateGradInput(input, gradOutput)
+     local dimension = self:_getPositiveDimension(input)
+-    -- zero-strides dont work with MKL/BLAS, so
+-    -- dont set self.gradInput to zero-stride tensor.
++    -- zero-strides don't work with MKL/BLAS, so
++    -- don't set self.gradInput to zero-stride tensor.
+     -- Instead, do a deepcopy
+     local size      = input:size()
+     size[dimension] = 1
+diff --git a/VolumetricDropout.lua b/VolumetricDropout.lua
+index 5f495af..1be85b1 100644
+--- a/VolumetricDropout.lua
++++ b/VolumetricDropout.lua
+@@ -19,7 +19,7 @@ function VolumetricDropout:updateOutput(input)
+       end
+       self.noise:bernoulli(1-self.p)
+       -- We expand the random dropouts to the entire feature map because the
+-      -- features are likely correlated accross the map and so the dropout
++      -- features are likely correlated across the map and so the dropout
+       -- should also be correlated.
+       self.output:cmul(torch.expandAs(self.noise, input))
+    else
+diff --git a/doc/simple.md b/doc/simple.md
+index 6f01a56..2d94465 100644
+--- a/doc/simple.md
++++ b/doc/simple.md
+@@ -598,7 +598,7 @@ end
+ module = nn.Copy(inputType, outputType, [forceCopy, dontCast])
+ ```
+ 
+-This layer copies the input to output with type casting from `inputType` to `outputType`. Unless `forceCopy` is true, when the first two arguments are the same, the input isn't copied, only transfered as the output. The default `forceCopy` is false.
++This layer copies the input to output with type casting from `inputType` to `outputType`. Unless `forceCopy` is true, when the first two arguments are the same, the input isn't copied, only transferred as the output. The default `forceCopy` is false.
+ When `dontCast` is true, a call to `nn.Copy:type(type)` will not cast the module's `output` and `gradInput` Tensors to the new type. The default is false.
+ 
+ <a name="nn.Narrow"></a>
+@@ -1432,10 +1432,10 @@ gpustr = torch.serialize(gpu)
+ ``` 
+ 
+ The module is located in the __nn__ package instead of __cunn__ as this allows
+-it to be used in CPU-only enviroments, which are common for production models.
++it to be used in CPU-only environments, which are common for production models.
+ 
+ The module supports nested table `input` and `gradOutput` tensors originating from multiple devices.
+-Each nested tensor in the returned `gradInput` will be transfered to the device its commensurate tensor in the `input`.
++Each nested tensor in the returned `gradInput` will be transferred to the device its commensurate tensor in the `input`.
+ 
+ The intended use-case is not for model-parallelism where the models are executed in parallel on multiple devices, but 
+ for sequential models where a single GPU doesn't have enough memory. 
+diff --git a/lib/THNN/generic/SpatialUpSamplingNearest.c b/lib/THNN/generic/SpatialUpSamplingNearest.c
+index 7ef093c..b67c68d 100644
+--- a/lib/THNN/generic/SpatialUpSamplingNearest.c
++++ b/lib/THNN/generic/SpatialUpSamplingNearest.c
+@@ -14,7 +14,7 @@ void THNN_(SpatialUpSamplingNearest_updateOutput)(
+   int yDim = input->nDimension-1;
+ 
+   // dims
+-  int idim = input->nDimension;  // Gauranteed to be between 3 and 5
++  int idim = input->nDimension;  // Guaranteed to be between 3 and 5
+   int osz0 = output->size[0];
+   int osz1 = output->size[1];
+   int osz2 = output->size[2];
+@@ -80,7 +80,7 @@ void THNN_(SpatialUpSamplingNearest_updateGradInput)(
+   int yDim = gradInput->nDimension-1;
+ 
+   // dims
+-  int idim = gradInput->nDimension;  // Gauranteed to be between 3 and 5
++  int idim = gradInput->nDimension;  // Guaranteed to be between 3 and 5
+   int isz0 = gradInput->size[0];
+   int isz1 = gradInput->size[1];
+   int isz2 = gradInput->size[2];
+diff --git a/test.lua b/test.lua
+index e288e25..fa16c47 100644
+--- a/test.lua
++++ b/test.lua
+@@ -6306,9 +6306,9 @@ function nntest.addSingletonDimension()
+    local resultArg = torch.Tensor()
+    local resultR = nn.utils.addSingletonDimension(resultArg, tensor, dim)
+    mytester:eq(resultArg:size():totable(), resultSize,
+-               'wrong content for random singleton dimention '..
++               'wrong content for random singleton dimension '..
+                'when the result is passed as argument')
+-   mytester:eq(resultArg, result, 'wrong content for random singleton dimention '..
++   mytester:eq(resultArg, result, 'wrong content for random singleton dimension '..
+                'when the result is passed as argument')
+ 
+    mytester:eq(resultR == resultArg, true,
diff --git a/debian/patches/series b/debian/patches/series
index 5022a13..babcb03 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,3 +1,4 @@
 THNN-cmake-add-soversion
 THNN-assume-torch-is-present
 cmake-only-generate-lua
+fix-spelling-errors

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/lua-torch-nn.git



More information about the debian-science-commits mailing list