Skip to content

Commit 0c59c88

Browse files
committed
Revert apache#15842 and apache#15894. These PRs should not modify CI timeouts.
1 parent cd397a3 commit 0c59c88

File tree

8 files changed

+6
-360
lines changed

8 files changed

+6
-360
lines changed

ci/jenkins/Jenkinsfile_unix_cpu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
2222

2323
// timeout in minutes
24-
max_time = 240
24+
max_time = 180
2525

2626
node('utility') {
2727
// Loading the utilities requires a node context unfortunately

python/mxnet/ndarray/numpy/_op.py

Lines changed: 1 addition & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
from ..ndarray import NDArray
2828

2929
__all__ = ['zeros', 'ones', 'add', 'subtract', 'multiply', 'divide', 'mod', 'power', 'tensordot',
30-
'linspace', 'expand_dims', 'tile', 'arange', 'split', 'concatenate', 'stack']
30+
'linspace', 'expand_dims', 'tile', 'arange', 'split']
3131

3232

3333
@set_module('mxnet.ndarray.numpy')
@@ -682,53 +682,3 @@ def split(ary, indices_or_sections, axis=0):
682682
if not isinstance(ret, list):
683683
return [ret]
684684
return ret
685-
686-
687-
@set_module('mxnet.ndarray.numpy')
688-
def concatenate(seq, axis=0, out=None):
689-
"""Join a sequence of arrays along an existing axis.
690-
Parameters
691-
----------
692-
a1, a2, ... : sequence of array_like
693-
The arrays must have the same shape, except in the dimension
694-
corresponding to `axis` (the first, by default).
695-
axis : int, optional
696-
The axis along which the arrays will be joined. If axis is None,
697-
arrays are flattened before use. Default is 0.
698-
out : ndarray, optional
699-
If provided, the destination to place the result. The shape must be
700-
correct, matching that of what concatenate would have returned if no
701-
out argument were specified.
702-
Returns
703-
-------
704-
res : ndarray
705-
The concatenated array.
706-
"""
707-
return _npi.concatenate(*seq, dim=axis, out=out)
708-
709-
710-
@set_module('mxnet.ndarray.numpy')
711-
def stack(arrays, axis=0, out=None):
712-
"""Join a sequence of arrays along a new axis.
713-
The axis parameter specifies the index of the new axis in the dimensions of the result.
714-
For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.
715-
Parameters
716-
----------
717-
arrays : sequence of array_like
718-
Each array must have the same shape.
719-
axis : int, optional
720-
The axis in the result array along which the input arrays are stacked.
721-
out : ndarray, optional
722-
If provided, the destination to place the result. The shape must be correct,
723-
matching that of what stack would have returned if no out argument were specified.
724-
Returns
725-
-------
726-
stacked : ndarray
727-
The stacked array has one more dimension than the input arrays."""
728-
def get_list(arrays):
729-
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
730-
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
731-
return [arr for arr in arrays]
732-
733-
arrays = get_list(arrays)
734-
return _npi.stack(*arrays, axis=axis, out=out)

python/mxnet/numpy/multiarray.py

Lines changed: 1 addition & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,7 @@
4444
from ..ndarray.numpy import _internal as _npi
4545

4646
__all__ = ['ndarray', 'empty', 'array', 'zeros', 'ones', 'add', 'subtract', 'multiply', 'divide',
47-
'mod', 'power', 'tensordot', 'linspace', 'expand_dims', 'tile', 'arange', 'split',
48-
'concatenate', 'stack']
47+
'mod', 'power', 'tensordot', 'linspace', 'expand_dims', 'tile', 'arange', 'split']
4948

5049

5150
# This function is copied from ndarray.py since pylint
@@ -1854,47 +1853,3 @@ def split(ary, indices_or_sections, axis=0):
18541853
If `indices_or_sections` is given as an integer, but
18551854
a split does not result in equal division."""
18561855
return _mx_nd_np.split(ary, indices_or_sections, axis=axis)
1857-
1858-
1859-
@set_module('mxnet.numpy')
1860-
def concatenate(seq, axis=0, out=None):
1861-
"""Join a sequence of arrays along an existing axis.
1862-
Parameters
1863-
----------
1864-
a1, a2, ... : sequence of array_like
1865-
The arrays must have the same shape, except in the dimension
1866-
corresponding to `axis` (the first, by default).
1867-
axis : int, optional
1868-
The axis along which the arrays will be joined. If axis is None,
1869-
arrays are flattened before use. Default is 0.
1870-
out : ndarray, optional
1871-
If provided, the destination to place the result. The shape must be
1872-
correct, matching that of what concatenate would have returned if no
1873-
out argument were specified.
1874-
Returns
1875-
-------
1876-
res : ndarray
1877-
The concatenated array.
1878-
"""
1879-
return _mx_nd_np.concatenate(seq, axis=axis, out=out)
1880-
1881-
1882-
@set_module('mxnet.numpy')
1883-
def stack(arrays, axis=0, out=None):
1884-
"""Join a sequence of arrays along a new axis.
1885-
The axis parameter specifies the index of the new axis in the dimensions of the result.
1886-
For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.
1887-
Parameters
1888-
----------
1889-
arrays : sequence of array_like
1890-
Each array must have the same shape.
1891-
axis : int, optional
1892-
The axis in the result array along which the input arrays are stacked.
1893-
out : ndarray, optional
1894-
If provided, the destination to place the result. The shape must be correct,
1895-
matching that of what stack would have returned if no out argument were specified.
1896-
Returns
1897-
-------
1898-
stacked : ndarray
1899-
The stacked array has one more dimension than the input arrays."""
1900-
return _mx_nd_np.stack(arrays, axis=axis, out=out)

python/mxnet/symbol/numpy/_symbol.py

Lines changed: 1 addition & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
from . import _internal as _npi
3131

3232
__all__ = ['zeros', 'ones', 'add', 'subtract', 'multiply', 'divide', 'mod', 'power', 'tensordot',
33-
'linspace', 'expand_dims', 'tile', 'arange', 'split', 'concatenate', 'stack']
33+
'linspace', 'expand_dims', 'tile', 'arange', 'split']
3434

3535

3636
def _num_outputs(sym):
@@ -1312,54 +1312,4 @@ def split(ary, indices_or_sections, axis=0):
13121312
return ret
13131313

13141314

1315-
@set_module('mxnet.symbol.numpy')
1316-
def concatenate(seq, axis=0, out=None):
1317-
"""Join a sequence of arrays along an existing axis.
1318-
Parameters
1319-
----------
1320-
a1, a2, ... : sequence of array_like
1321-
The arrays must have the same shape, except in the dimension
1322-
corresponding to `axis` (the first, by default).
1323-
axis : int, optional
1324-
The axis along which the arrays will be joined. If axis is None,
1325-
arrays are flattened before use. Default is 0.
1326-
out : ndarray, optional
1327-
If provided, the destination to place the result. The shape must be
1328-
correct, matching that of what concatenate would have returned if no
1329-
out argument were specified.
1330-
Returns
1331-
-------
1332-
res : ndarray
1333-
The concatenated array.
1334-
"""
1335-
return _npi.concatenate(*seq, dim=axis, out=out)
1336-
1337-
1338-
@set_module('mxnet.symbol.numpy')
1339-
def stack(arrays, axis=0, out=None):
1340-
"""Join a sequence of arrays along a new axis.
1341-
The axis parameter specifies the index of the new axis in the dimensions of the result.
1342-
For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.
1343-
Parameters
1344-
----------
1345-
arrays : sequence of array_like
1346-
Each array must have the same shape.
1347-
axis : int, optional
1348-
The axis in the result array along which the input arrays are stacked.
1349-
out : ndarray, optional
1350-
If provided, the destination to place the result. The shape must be correct,
1351-
matching that of what stack would have returned if no out argument were specified.
1352-
Returns
1353-
-------
1354-
stacked : ndarray
1355-
The stacked array has one more dimension than the input arrays."""
1356-
def get_list(arrays):
1357-
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
1358-
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
1359-
return [arr for arr in arrays]
1360-
1361-
arrays = get_list(arrays)
1362-
return _npi.stack(*arrays, axis=axis, out=out)
1363-
1364-
13651315
_set_np_symbol_class(_Symbol)

src/operator/numpy/np_matrix_op-inl.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@
2727

2828
#include <vector>
2929
#include "../tensor/matrix_op-inl.h"
30-
#include "../nn/concat-inl.h"
3130

3231
namespace mxnet {
3332
namespace op {

src/operator/numpy/np_matrix_op.cc

Lines changed: 1 addition & 98 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@
2323
* \brief CPU Implementation of numpy matrix operations
2424
*/
2525

26-
#include <vector>
2726
#include "./np_matrix_op-inl.h"
27+
#include "../nn/concat-inl.h"
2828

2929
namespace mxnet {
3030
namespace op {
@@ -248,102 +248,5 @@ NNVM_REGISTER_OP(_np_squeeze)
248248
.add_argument("a", "NDArray-or-Symbol[]", "data to squeeze")
249249
.add_arguments(SqueezeParam::__FIELDS__());
250250

251-
bool ConcatShape(const nnvm::NodeAttrs& attrs,
252-
mxnet::ShapeVector *in_shape,
253-
mxnet::ShapeVector *out_shape);
254-
255-
bool ConcatType(const nnvm::NodeAttrs& attrs,
256-
std::vector<int> *in_type,
257-
std::vector<int> *out_type);
258-
259-
struct NumpyConcatGrad {
260-
const char *op_name;
261-
std::vector<nnvm::NodeEntry> operator()(const nnvm::NodePtr& n,
262-
const std::vector<nnvm::NodeEntry>& ograds) const {
263-
CHECK_EQ(ograds.size(), 1);
264-
std::vector<nnvm::NodeEntry> heads(ograds.begin(), ograds.end());
265-
return MakeGradNode(op_name, n, heads, n->attrs.dict);
266-
}
267-
};
268-
269-
270-
NNVM_REGISTER_OP(_npi_concatenate)
271-
.describe(R"code(Join a sequence of arrays along an existing axis.)code" ADD_FILELINE)
272-
.set_num_inputs([](const NodeAttrs& attrs) {
273-
const ConcatParam& params = nnvm::get<ConcatParam>(attrs.parsed);
274-
return params.num_args;
275-
})
276-
.set_num_outputs(1)
277-
.set_attr_parser(ParamParser<ConcatParam>)
278-
.set_attr<nnvm::FListInputNames>("FListInputNames",
279-
[](const NodeAttrs& attrs) {
280-
const ConcatParam& params = nnvm::get<ConcatParam>(attrs.parsed);
281-
std::vector<std::string> ret;
282-
for (int i = 0; i < params.num_args; ++i) {
283-
ret.push_back(std::string("data") + std::to_string(i));
284-
}
285-
return ret;
286-
})
287-
.set_attr<nnvm::FListOutputNames>("FListOutputNames",
288-
[](const NodeAttrs& attrs) {
289-
return std::vector<std::string>{"out"};
290-
})
291-
.set_attr<std::string>("key_var_num_args", "num_args")
292-
.set_attr<nnvm::FInferType>("FInferType", ConcatType)
293-
.set_attr<mxnet::FInferShape>("FInferShape", ConcatShape)
294-
.set_attr<FCompute>("FCompute<cpu>", ConcatCompute<cpu>)
295-
.set_attr<nnvm::FGradient>("FGradient", NumpyConcatGrad{"_backward_np_concat"})
296-
.add_argument("data", "NDArray-or-Symbol[]", "List of arrays to concatenate")
297-
.add_arguments(ConcatParam::__FIELDS__());
298-
299-
NNVM_REGISTER_OP(_backward_np_concat)
300-
.set_num_outputs([](const NodeAttrs& attrs) {
301-
const ConcatParam& params = nnvm::get<ConcatParam>(attrs.parsed);
302-
return params.num_args;
303-
})
304-
.set_attr_parser(ParamParser<ConcatParam>)
305-
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
306-
.set_attr<FCompute>("FCompute<cpu>", ConcatGradCompute<cpu>);
307-
308-
NNVM_REGISTER_OP(_npi_stack)
309-
.describe(R"code(Join a sequence of arrays along a new axis.
310-
311-
The axis parameter specifies the index of the new axis in the dimensions of the
312-
result. For example, if axis=0 it will be the first dimension and if axis=-1 it
313-
will be the last dimension.
314-
315-
Examples::
316-
317-
x = [1, 2]
318-
y = [3, 4]
319-
320-
stack(x, y) = [[1, 2],
321-
[3, 4]]
322-
stack(x, y, axis=1) = [[1, 3],
323-
[2, 4]]
324-
)code")
325-
.set_num_inputs([](const nnvm::NodeAttrs& attrs) {
326-
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
327-
return static_cast<uint32_t>(param.num_args);
328-
})
329-
.set_num_outputs(1)
330-
.set_attr_parser(ParamParser<StackParam>)
331-
.set_attr<nnvm::FListInputNames>("FListInputNames",
332-
[](const NodeAttrs& attrs) {
333-
uint32_t num_args = dmlc::get<StackParam>(attrs.parsed).num_args;
334-
std::vector<std::string> ret;
335-
for (uint32_t i = 0; i < num_args; ++i) {
336-
ret.push_back(std::string("arg") + std::to_string(i));
337-
}
338-
return ret;
339-
})
340-
.set_attr<std::string>("key_var_num_args", "num_args")
341-
.set_attr<mxnet::FInferShape>("FInferShape", StackOpShape)
342-
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<-1, 1>)
343-
.set_attr<FCompute>("FCompute<cpu>", StackOpForward<cpu>)
344-
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{"_backward_stack"})
345-
.add_argument("data", "NDArray-or-Symbol[]", "List of arrays to stack")
346-
.add_arguments(StackParam::__FIELDS__());
347-
348251
} // namespace op
349252
} // namespace mxnet

src/operator/numpy/np_matrix_op.cu

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@
2222
* \file np_matrix_op.cu
2323
* \brief GPU Implementation of numpy matrix operations
2424
*/
25-
2625
#include "./np_matrix_op-inl.h"
26+
#include "../nn/concat-inl.h"
2727

2828
namespace mxnet {
2929
namespace op {
@@ -37,14 +37,5 @@ NNVM_REGISTER_OP(_np_reshape)
3737
NNVM_REGISTER_OP(_np_squeeze)
3838
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
3939

40-
NNVM_REGISTER_OP(_npi_concatenate)
41-
.set_attr<FCompute>("FCompute<gpu>", ConcatCompute<gpu>);
42-
43-
NNVM_REGISTER_OP(_backward_np_concat)
44-
.set_attr<FCompute>("FCompute<gpu>", ConcatGradCompute<gpu>);
45-
46-
NNVM_REGISTER_OP(_npi_stack)
47-
.set_attr<FCompute>("FCompute<gpu>", StackOpForward<gpu>);
48-
4940
} // namespace op
5041
} // namespace mxnet

0 commit comments

Comments
 (0)