Skip to content

Commit 207f3bc

Browse files
authored
Merge pull request #69 from iotamudelta/master
Merge from upstream
2 parents 6c4068e + e64bed0 commit 207f3bc

File tree

181 files changed

+11182
-8645
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

181 files changed

+11182
-8645
lines changed

.jenkins/pytorch/test.sh

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,13 +44,10 @@ if [[ "$BUILD_ENVIRONMENT" == *asan* ]]; then
4444
(cd test && ! get_exit_code python -c "import torch; torch._C._crash_if_aten_asan(3)")
4545
fi
4646

47-
export ATEN_DISABLE_AVX=
48-
export ATEN_DISABLE_AVX2=
4947
if [[ "${JOB_BASE_NAME}" == *-NO_AVX-* ]]; then
50-
export ATEN_DISABLE_AVX=1
51-
fi
52-
if [[ "${JOB_BASE_NAME}" == *-NO_AVX2-* ]]; then
53-
export ATEN_DISABLE_AVX2=1
48+
export ATEN_CPU_CAPABILITY=default
49+
elif [[ "${JOB_BASE_NAME}" == *-NO_AVX2-* ]]; then
50+
export ATEN_CPU_CAPABILITY=avx
5451
fi
5552

5653
test_python_nn() {

aten/src/ATen/Layout.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
#pragma once
22

33
#include <ATen/ScalarType.h>
4+
#include <ATen/Error.h>
5+
6+
#include <iostream>
47

58
namespace at {
69
enum class Layout { Strided, Sparse };
@@ -18,3 +21,14 @@ inline Layout layout_from_backend(Backend backend) {
1821
}
1922
}
2023
} // namespace at
24+
25+
inline std::ostream& operator<<(std::ostream& stream, at::Layout layout) {
26+
switch (layout) {
27+
case at::kStrided:
28+
return stream << "Strided";
29+
case at::kSparse:
30+
return stream << "Sparse";
31+
default:
32+
AT_ERROR("Unknown layout");
33+
}
34+
}

aten/src/ATen/ScalarType.h

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
#pragma once
22

3-
#include <stdint.h>
4-
53
#include "ATen/ArrayRef.h"
64
#include "ATen/ATenGeneral.h"
75
#include "ATen/Half.h"
86

7+
#include <cstdint>
8+
#include <iostream>
9+
910
namespace at {
1011

1112
// NB: Order matters for this macro; it is relied upon in
@@ -168,3 +169,9 @@ typedef ArrayRef<int64_t> IntList;
168169
typedef ArrayRef<Tensor> TensorList;
169170

170171
} // namespace at
172+
173+
inline std::ostream& operator<<(
174+
std::ostream& stream,
175+
at::ScalarType scalar_type) {
176+
return stream << at::toString(scalar_type);
177+
}

aten/src/ATen/SparseTensorImpl.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ namespace at {
1919
// we don't currently support zero-size dimensions, so we can't actually
2020
// do this; so we just allocate zero-size tensors for everything.
2121
SparseTensorImpl::SparseTensorImpl(Type * type)
22-
: TensorImpl(type)
22+
: TensorImpl(type, nullptr)
2323
, size_{0}
2424
, sparseDims_(1)
2525
, denseDims_(0)

aten/src/ATen/TensorImpl.cpp

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
#include <ATen/Tensor.h>
44
#include <ATen/optional.h>
55

6+
#include <TH/THTensor.hpp>
7+
68
namespace at {
79
Tensor& TensorImpl::grad() {
810
AT_ERROR("grad is not implemented for Tensor");
@@ -33,4 +35,42 @@ void Tensor::backward(
3335
bool create_graph) {
3436
pImpl->backward(std::move(gradient), keep_graph, create_graph);
3537
}
38+
39+
TensorImpl::~TensorImpl() {
40+
if (tensor) tensor->release();
41+
}
42+
43+
IntList TensorImpl::sizes() const {
44+
// NB: dim in tensor is not synchronized with THTensor, so it's
45+
// important to apply dim here
46+
return IntList(THTensor_getSizePtr(tensor), dim());
47+
}
48+
49+
IntList TensorImpl::strides() const {
50+
// NB: dim in tensor is not synchronized with THTensor, so it's
51+
// important to apply dim here
52+
return IntList(THTensor_getStridePtr(tensor), dim());
53+
}
54+
55+
void TensorImpl::release_resources() {
56+
if (tensor) {
57+
tensor->release();
58+
tensor = nullptr;
59+
}
60+
}
61+
62+
int64_t TensorImpl::dim() const {
63+
if (isScalar()) {
64+
return 0;
65+
}
66+
return tensor->dim();
67+
}
68+
69+
void * TensorImpl::unsafeGetTH(bool retain) {
70+
if (retain) {
71+
tensor->retain();
72+
}
73+
return tensor;
74+
}
75+
3676
} // namespace at

aten/src/ATen/TensorImpl.h

Lines changed: 23 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77
#include "ATen/ScalarType.h"
88
#include "ATen/optional.h"
99

10+
struct THTensor;
11+
1012
namespace at {
1113
class Scalar;
1214
struct Type;
@@ -15,23 +17,27 @@ struct Tensor;
1517
} // namespace at
1618

1719
namespace at {
18-
struct TensorImpl : public Retainable {
19-
explicit TensorImpl(Type * type)
20-
: is_scalar(false), type_(type) {}
20+
struct AT_API TensorImpl : public Retainable {
21+
explicit TensorImpl(Type * type, THTensor * tensor)
22+
: is_scalar(false), type_(type), tensor(tensor) {}
23+
24+
virtual ~TensorImpl();
25+
26+
virtual void release_resources() override;
2127

2228
Type & type() const {
2329
return *type_;
2430
}
2531
virtual const char * toString() const = 0;
26-
virtual IntList sizes() const = 0;
27-
virtual IntList strides() const = 0;
28-
virtual int64_t dim() const = 0;
32+
virtual IntList sizes() const;
33+
virtual IntList strides() const;
34+
virtual int64_t dim() const;
2935
/**
3036
* Perform a conversion of this tensor to a scalar, if numel() == 1.
3137
* Otherwise, raise an error.
3238
*/
3339
virtual Scalar localScalar() = 0;
34-
virtual void * unsafeGetTH(bool retain) = 0;
40+
virtual void * unsafeGetTH(bool retain);
3541
virtual std::unique_ptr<Storage> storage() = 0;
3642
friend struct Type;
3743

@@ -69,30 +75,32 @@ struct TensorImpl : public Retainable {
6975
// Some methods below are defined in TensorImpl.cpp because Tensor is an
7076
// incomplete type.
7177

72-
AT_API virtual void set_requires_grad(bool requires_grad) {
78+
virtual void set_requires_grad(bool requires_grad) {
7379
AT_ERROR("set_requires_grad is not implemented for Tensor");
7480
}
75-
AT_API virtual bool requires_grad() const {
81+
virtual bool requires_grad() const {
7682
AT_ERROR("requires_grad is not implemented for Tensor");
7783
}
7884

79-
AT_API virtual Tensor& grad();
80-
AT_API virtual const Tensor& grad() const;
85+
virtual Tensor& grad();
86+
virtual const Tensor& grad() const;
8187

82-
AT_API virtual Tensor detach() const;
83-
AT_API virtual void detach_() {
88+
virtual Tensor detach() const;
89+
virtual void detach_() {
8490
AT_ERROR("detach_ is not implemented for Tensor");
8591
}
8692

87-
AT_API virtual void backward(
93+
virtual void backward(
8894
at::optional<Tensor> gradient,
8995
bool keep_graph,
9096
bool create_graph);
9197

92-
AT_API virtual void set_data(Tensor new_data);
98+
virtual void set_data(Tensor new_data);
9399

94100
protected:
95101
bool is_scalar;
96102
Type * type_;
103+
public:
104+
THTensor * tensor;
97105
};
98106
} // namespace at

aten/src/ATen/TensorOptions.cpp

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
#include <ATen/ScalarType.h>
77
#include <ATen/optional.h>
88

9+
#include <iostream>
10+
911
namespace at {
1012

1113
TensorOptions::TensorOptions(bool use_thread_local_default_options) {
@@ -17,3 +19,13 @@ TensorOptions::TensorOptions(bool use_thread_local_default_options) {
1719
}
1820
}
1921
} // namespace at
22+
23+
std::ostream& operator<<(
24+
std::ostream& stream,
25+
const at::TensorOptions& options) {
26+
return stream << "TensorOptions(dtype=" << options.dtype()
27+
<< ", device=" << options.device()
28+
<< ", layout=" << options.layout()
29+
<< ", requires_grad=" << std::boolalpha
30+
<< options.requires_grad() << ")";
31+
}

aten/src/ATen/TensorOptions.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <ATen/Type.h>
1010

1111
#include <cstddef>
12+
#include <iosfwd>
1213
#include <utility>
1314

1415
namespace at {
@@ -277,3 +278,7 @@ inline Tensor Tensor::to(Device device, bool non_blocking) const {
277278
return detail::to(*this, options().device(device), non_blocking);
278279
}
279280
} // namespace at
281+
282+
std::ostream& operator<<(
283+
std::ostream& stream,
284+
const at::TensorOptions& options);

aten/src/ATen/UndefinedTensor.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ namespace at {
66

77
// should this use the globalContext? Can it get a context passed in somehow?
88
UndefinedTensor::UndefinedTensor()
9-
: TensorImpl(&(globalContext().getType(Backend::Undefined,ScalarType::Undefined))) {
9+
: TensorImpl(&(globalContext().getType(Backend::Undefined,ScalarType::Undefined)), nullptr) {
1010
}
1111

1212
const char * UndefinedTensor::toString() const {

aten/src/ATen/code_template.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,15 +50,17 @@ def replace(match):
5050
comma_after = ', '
5151
key = key[:-1]
5252
v = lookup(key)
53-
if indent is not None and isinstance(v, list):
53+
if indent is not None:
54+
if not isinstance(v, list):
55+
v = [v]
5456
return indent_lines(indent, v)
5557
elif isinstance(v, list):
5658
middle = ', '.join([str(x) for x in v])
5759
if len(v) == 0:
5860
return middle
5961
return comma_before + middle + comma_after
6062
else:
61-
return (indent or '') + str(v)
63+
return str(v)
6264
return self.subtitution.sub(replace, self.pattern)
6365

6466

aten/src/ATen/copy_wrapper.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ def create_one_copy(dst_type, all_types):
116116
cuda = ''
117117
state = []
118118
if src_type['Backend'] == 'CUDA' or dst_type['Backend'] == 'CUDA':
119-
state.append('context->getTHCState()')
119+
state.append('globalContext().getTHCState()')
120120
if src_type['Backend'] == 'CUDA':
121121
if dst_type['Backend'] == 'CUDA':
122122
cuda = 'Cuda'
@@ -183,7 +183,7 @@ def create_one_copy_from(src_type, all_types):
183183
if src_type['Backend'] == 'CUDA':
184184
cuda = 'Cuda'
185185
if dst_type['Backend'] == 'CUDA' or src_type['Backend'] == 'CUDA':
186-
state.append('context->getTHCState()')
186+
state.append('globalContext().getTHCState()')
187187

188188
body_env = nested_dict({
189189
'src_scalar_name': src_type['ScalarName'],

aten/src/ATen/function_wrapper.py

Lines changed: 24 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ def TypedDict(name, attrs, total=True): # type: ignore
180180
}""")
181181

182182
BUFFER_DEFINITION = CodeTemplate("""\
183-
auto ${name}_ = new ${Tensor}(context);
183+
auto ${name}_ = new ${Tensor}(${THTensor}_new());
184184
auto ${name} = Tensor(${name}_, false);""")
185185

186186
CONDITIONAL_INITIALIZER = CodeTemplate("""\
@@ -277,7 +277,7 @@ def __init__(self, reason):
277277
'THStorage*': CodeTemplate('checked_cast_storage<${Storage}>(&${arg_name},"${arg_name}",${arg_pos})'),
278278
'THGenerator*':
279279
CodeTemplate(
280-
'check_generator<${Backend}Generator>(${arg_name}, &context->defaultGenerator(backend()))'),
280+
'check_generator<${Backend}Generator>(${arg_name}, &globalContext().defaultGenerator(backend()))'),
281281
# This is a cast done via direct-construction
282282
'THSize*': CodeTemplate('THLongStorageView ${result_name}(${arg_name}, THLongStorageViewKind::SIZE);'),
283283
# This is a cast done via direct-construction
@@ -306,14 +306,24 @@ def __init__(self, reason):
306306

307307
CHECKED_USE_NULLABLE = CodeTemplate('${arg_name}_ ? ${usage} : NULL')
308308

309+
ALLOC_NOARGS_WRAP = {
310+
'THTensor*': 'detail::new_${Tensor}()',
311+
'THBoolTensor*': 'detail::new_${Backend}ByteTensor()',
312+
'THIndexTensor*': 'detail::new_${Backend}LongTensor()',
313+
'THIntegerTensor*': 'detail::new_${Backend}IntTensor()',
314+
'THSTensor*': 'detail::new_Sparse${Tensor}()',
315+
'THDenseTensor*': 'detail::new_${DenseTensor}()',
316+
'THDenseIndexTensor*': 'detail::new_${DenseBackend}LongTensor()',
317+
}
318+
309319
ALLOC_WRAP = {
310-
'THTensor*': 'new ${Tensor}(context${,arguments})',
311-
'THBoolTensor*': 'new ${Backend}ByteTensor(context${,arguments})',
312-
'THIndexTensor*': 'new ${Backend}LongTensor(context${,arguments})',
313-
'THIntegerTensor*': 'new ${Backend}IntTensor(context${,arguments})',
314-
'THSTensor*': 'new Sparse${Tensor}(context${,arguments})',
315-
'THDenseTensor*': 'new ${DenseTensor}(context${,arguments})',
316-
'THDenseIndexTensor*': 'new ${DenseBackend}LongTensor(context${,arguments})',
320+
'THTensor*': 'new ${Tensor}(${arguments})',
321+
'THBoolTensor*': 'new ${Backend}ByteTensor(${arguments})',
322+
'THIndexTensor*': 'new ${Backend}LongTensor(${arguments})',
323+
'THIntegerTensor*': 'new ${Backend}IntTensor(${arguments})',
324+
'THSTensor*': 'new Sparse${Tensor}(${arguments})',
325+
'THDenseTensor*': 'new ${DenseTensor}(${arguments})',
326+
'THDenseIndexTensor*': 'new ${DenseBackend}LongTensor(${arguments})',
317327
}
318328

319329
# Replacements for constants when calling into TH
@@ -1228,7 +1238,10 @@ def handle_sparse(env, option):
12281238
def allocate_arg(env, arg, output_count):
12291239
# type: (Environment, THFormal, int) -> List[str]
12301240
name = arg['name']
1231-
allocation = CodeTemplate(ALLOC_WRAP[arg['type']]).substitute(env, arguments=[])
1241+
state = ''
1242+
if is_cuda:
1243+
state = 'globalContext().getTHCState()'
1244+
allocation = CodeTemplate(ALLOC_NOARGS_WRAP[arg['type']]).substitute(env)
12321245
tensor_arg = '{}_'.format(name)
12331246
if arg.get('mask', False):
12341247
allocation = 'output_mask[{}] ? {} : nullptr'.format(output_count, allocation)
@@ -1257,7 +1270,7 @@ def handle_call(env, option, cimpl):
12571270
is_nn = option['mode'] == 'NN'
12581271
actuals = get_arguments(cimpl['arguments'], option)
12591272
if is_cuda or is_nn:
1260-
actuals = ['context->getTHCState()'] + actuals
1273+
actuals = ['globalContext().getTHCState()'] + actuals
12611274

12621275
cname = cimpl['cname']
12631276
if option.get('sparse', False):

aten/src/ATen/gen.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,7 @@ def generate_storage_type_and_tensor(backend, density, scalar_type, declarations
273273
env['THStorage'] = 'THCuda{}Storage'.format(sname)
274274
env['THTensor'] = 'THCuda{}Tensor'.format(sname)
275275
env['THIndexTensor'] = 'THCudaLongTensor'
276-
env['state'] = ['context->getTHCState()']
276+
env['state'] = ['globalContext().getTHCState()']
277277
env['isCUDA'] = 'true'
278278
env['storage_device'] = 'return storage->device;'
279279
env['Generator'] = 'CUDAGenerator'

0 commit comments

Comments
 (0)