summaryrefslogtreecommitdiff
path: root/aten
diff options
context:
space:
mode:
authorAce <tinyshine@yeah.net>2018-04-18 21:44:08 +0800
committerSoumith Chintala <soumith@gmail.com>2018-04-18 09:44:08 -0400
commit2a628ba32f3e71f1fc6b31383e46e9b09db9abd6 (patch)
tree97cc1dace9f875320c4bfd383db2b327edb7133c /aten
parentbd0cc7d3649473fe1b38f4867cbfbd40149c81f4 (diff)
downloadpytorch-2a628ba32f3e71f1fc6b31383e46e9b09db9abd6.tar.gz
pytorch-2a628ba32f3e71f1fc6b31383e46e9b09db9abd6.tar.bz2
pytorch-2a628ba32f3e71f1fc6b31383e46e9b09db9abd6.zip
Update README.md (#6703)
Diffstat (limited to 'aten')
-rw-r--r--aten/README.md6
1 files changed, 3 insertions, 3 deletions
diff --git a/aten/README.md b/aten/README.md
index 27fff09fb6..64b395917f 100644
--- a/aten/README.md
+++ b/aten/README.md
@@ -60,7 +60,7 @@ Here is a simple example; again, the syntax follows Torch semantics.
using namespace at; // assumed in the following
Tensor d = CPU(kFloat).ones({3, 4});
-Tensor r = CPU(kFloat).zeros({3,4})
+Tensor r = CPU(kFloat).zeros({3,4});
for(auto i = 0; i < 100000; i++) {
r = r.add(d);
// equivalently
@@ -75,7 +75,7 @@ Want this running on the GPU?
using namespace at; // assumed in the following
Tensor d = CUDA(kFloat).ones({3, 4});
-Tensor r = CUDA(kFloat).zeros({3,4})
+Tensor r = CUDA(kFloat).zeros({3,4});
for(auto i = 0; i < 100000; i++) {
r = r.add(d);
// equivalently
@@ -208,7 +208,7 @@ to the CPU, this would result in 2 copies. To avoid these synchronizations, Scal
optionally backed by a zero-dim Tensor, and are only copied to the CPU when requested.
```c++
-auto a = CUDA(kFloat).rand({3,4})
+auto a = CUDA(kFloat).rand({3,4});
Scalar on_gpu = Scalar(a[1][1]); //backed by zero-dim Tensor
assert(on_gpu.isBackedByTensor());