提交 18782015 编写于 作者: J Juncheng 提交者: GitHub

Remove broadcast/reduce/reshape system op (#3199)

* Remove reduce/broadcast system op

* fix GenBroadcastToCompatibleWithGradOpConf

* remove useless file

* Remove reshape system op

* revert dynamic reshape grad

* revert DynamicReshapeKernel

* revert DynamicReshapeLikeKernel

Former-commit-id: 94cf6a11
上级 1fd7ddbf
#include "oneflow/core/job_completer/autograd.h"
#include "oneflow/core/common/shape_view.h"
#include "oneflow/core/framework/framework.h"
namespace oneflow {
......@@ -22,17 +23,16 @@ Maybe<void> GenBroadcastToCompatibleWithGradOpConf(
CHECK_EQ(x_extend_shape.At(i), y_shape.At(i));
}
}
OperatorConf reduce_sum_like_op;
reduce_sum_like_op.set_name("System-AutoGrad-" + op.op_name());
ReduceSumLikeOpConf* conf = reduce_sum_like_op.mutable_reduce_sum_like_conf();
conf->set_x(GenLogicalBlobName(*DiffLbi4BnInOp("y")));
conf->set_like(GenLogicalBlobName(op.BnInOp2Lbi("x")));
conf->set_y("y");
*conf->mutable_axis() = StdVec2PbRf(reduced_axes);
op_confs->push_back(reduce_sum_like_op);
DiffLbi4BnInOp("x")->set_op_name(reduce_sum_like_op.name());
DiffLbi4BnInOp("x")->set_blob_name(conf->y());
const auto reduce_sum_like_op =
user_op::UserOpConfWrapperBuilder("System-AutoGrad-" + op.op_name())
.Op("reduce_sum_like")
.Input("x", GenLogicalBlobName(*DiffLbi4BnInOp("y")))
.Input("like", GenLogicalBlobName(op.BnInOp2Lbi("x")))
.Attr<std::vector<int32_t>>("axis", reduced_axes)
.Output("y")
.Build();
op_confs->push_back(reduce_sum_like_op.op_conf());
*DiffLbi4BnInOp("x") = GenLogicalBlobId(reduce_sum_like_op.output("y", 0));
}
return Maybe<void>::Ok();
}
......
......@@ -4,23 +4,6 @@ namespace oneflow {
namespace {
void GenerateBackwardOpConf4Reshape(
const Operator& op, std::vector<OperatorConf>* op_confs,
const std::function<LogicalBlobId*(const std::string&)>& DiffLbi4BnInOp) {
CHECK(op.op_conf().has_reshape_conf());
if (DiffLbi4BnInOp("in") != nullptr) {
OperatorConf reverse_reshape_op;
reverse_reshape_op.set_name(op.op_name() + "_grad");
ReshapeLikeOpConf* reshape_like_op_conf = reverse_reshape_op.mutable_reshape_like_conf();
reshape_like_op_conf->set_x(GenLogicalBlobName(*DiffLbi4BnInOp("out")));
reshape_like_op_conf->set_like(GenLogicalBlobName(op.BnInOp2Lbi("in")));
reshape_like_op_conf->set_y("y");
op_confs->push_back(reverse_reshape_op);
DiffLbi4BnInOp("in")->set_op_name(reverse_reshape_op.name());
DiffLbi4BnInOp("in")->set_blob_name("y");
}
}
void GenerateBackwardOpConf4DynamicReshape(
const Operator& op, std::vector<OperatorConf>* op_confs,
const std::function<LogicalBlobId*(const std::string&)>& DiffLbi4BnInOp) {
......@@ -41,7 +24,6 @@ void GenerateBackwardOpConf4DynamicReshape(
} // namespace
REGISTER_OP_GRAD(OperatorConf::kReshapeConf, GenerateBackwardOpConf4Reshape);
REGISTER_OP_GRAD(OperatorConf::kDynamicReshapeConf, GenerateBackwardOpConf4DynamicReshape);
} // namespace oneflow
#include "oneflow/core/job_completer/autograd.h"
namespace oneflow {
namespace {
void GenerateBackwardOpConf(
const Operator& op, std::vector<OperatorConf>* op_confs,
const std::function<LogicalBlobId*(const std::string&)>& DiffLbi4BnInOp) {
CHECK(op.op_conf().has_reduce_sum_conf());
if (DiffLbi4BnInOp("in") != nullptr) {
OperatorConf broadcast_like_op;
broadcast_like_op.set_name(op.op_name() + "_grad");
BroadcastLikeOpConf* broadcast_like_op_conf = broadcast_like_op.mutable_broadcast_like_conf();
broadcast_like_op_conf->set_x(GenLogicalBlobName(*DiffLbi4BnInOp("out")));
broadcast_like_op_conf->set_like(GenLogicalBlobName(op.BnInOp2Lbi("in")));
broadcast_like_op_conf->set_y("y");
const ReduceSumOpConf& reduce_sum_op_conf = op.op_conf().reduce_sum_conf();
broadcast_like_op_conf->mutable_reduced_axis()->CopyFrom(reduce_sum_op_conf.axis());
op_confs->push_back(broadcast_like_op);
DiffLbi4BnInOp("in")->set_op_name(broadcast_like_op.name());
DiffLbi4BnInOp("in")->set_blob_name("y");
}
}
} // namespace
REGISTER_OP_GRAD(OperatorConf::kReduceSumConf, &GenerateBackwardOpConf);
} // namespace oneflow
#include "oneflow/core/kernel/broadcast_like_kernel.h"
#include "oneflow/core/ndarray/ndarray_util.h"
namespace oneflow {
namespace {
template<DeviceType device_type, typename T>
void BroadcastLikeKernel<device_type, T>::ForwardDataContent(
const KernelCtx& ctx, std::function<Blob*(const std::string&)> BnInOp2Blob) const {
const Blob* x_blob = BnInOp2Blob("x");
const Blob* like_blob = BnInOp2Blob("like");
Blob* y_blob = BnInOp2Blob("y");
const int64_t num_axes = y_blob->shape().NumAxes();
const BroadcastLikeOpConf& conf = this->op_conf().broadcast_like_conf();
CHECK(!conf.reduced_axis().empty());
const Shape& reduced_shape = CreateReducedShapeOrOnesShape(
like_blob->shape(), {conf.reduced_axis().begin(), conf.reduced_axis().end()});
NdarrayUtil<device_type, T>::BroadcastTo(
ctx.device_ctx, XpuVarNdarray<T>(y_blob, num_axes),
XpuVarNdarray<const T>(reduced_shape, x_blob->dptr<T>()));
}
ADD_DEFAULT_KERNEL_CREATOR(OperatorConf::kBroadcastLikeConf, BroadcastLikeKernel,
FLOATING_DATA_TYPE_SEQ);
} // namespace
} // namespace oneflow
#ifndef ONEFLOW_CORE_KERNEL_BROADCAST_LIKE_KERNEL_H_
#define ONEFLOW_CORE_KERNEL_BROADCAST_LIKE_KERNEL_H_
#include "oneflow/core/kernel/kernel.h"
namespace oneflow {
namespace {
template<DeviceType device_type, typename T>
class BroadcastLikeKernel final : public KernelIf<device_type> {
public:
OF_DISALLOW_COPY_AND_MOVE(BroadcastLikeKernel);
BroadcastLikeKernel() = default;
~BroadcastLikeKernel() = default;
private:
void ForwardDataContent(const KernelCtx&,
std::function<Blob*(const std::string&)>) const override;
};
} // namespace
} // namespace oneflow
#endif // ONEFLOW_CORE_KERNEL_BROADCAST_LIKE_KERNEL_H_
#include "oneflow/core/kernel/reshape_kernel.h"
#include "oneflow/core/kernel/kernel.h"
namespace oneflow {
template<DeviceType device_type>
void ReshapeKernel<device_type>::ForwardDataContent(
class DynamicReshapeKernel final : public KernelIf<device_type> {
public:
OF_DISALLOW_COPY_AND_MOVE(DynamicReshapeKernel);
DynamicReshapeKernel() = default;
~DynamicReshapeKernel() override = default;
private:
void ForwardDataContent(const KernelCtx&,
std::function<Blob*(const std::string&)>) const override;
};
template<DeviceType device_type>
void DynamicReshapeKernel<device_type>::ForwardDataContent(
const KernelCtx& ctx, std::function<Blob*(const std::string&)> BnInOp2Blob) const {
const Blob* in_blob = BnInOp2Blob("in");
Blob* out_blob = BnInOp2Blob("out");
out_blob->CopyDataContentFrom(ctx.device_ctx, in_blob);
}
ADD_DEVICE_TYPE_KERNEL_CREATOR(OperatorConf::kReshapeConf, ReshapeKernel);
ADD_DEVICE_TYPE_KERNEL_CREATOR(OperatorConf::kDynamicReshapeConf, ReshapeKernel);
ADD_DEVICE_TYPE_KERNEL_CREATOR(OperatorConf::kDynamicReshapeConf, DynamicReshapeKernel);
} // namespace oneflow
#include "oneflow/core/kernel/reshape_like_kernel.h"
#include "oneflow/core/kernel/kernel.h"
namespace oneflow {
template<DeviceType device_type>
void ReshapeLikeKernel<device_type>::ForwardDataContent(
class DynamicReshapeLikeKernel final : public KernelIf<device_type> {
public:
OF_DISALLOW_COPY_AND_MOVE(DynamicReshapeLikeKernel);
DynamicReshapeLikeKernel() = default;
~DynamicReshapeLikeKernel() override = default;
private:
void ForwardDataContent(const KernelCtx&,
std::function<Blob*(const std::string&)>) const override;
};
template<DeviceType device_type>
void DynamicReshapeLikeKernel<device_type>::ForwardDataContent(
const KernelCtx& ctx, std::function<Blob*(const std::string&)> BnInOp2Blob) const {
const Blob* in_blob = BnInOp2Blob("x");
Blob* out_blob = BnInOp2Blob("y");
out_blob->CopyDataContentFrom(ctx.device_ctx, in_blob);
}
ADD_DEVICE_TYPE_KERNEL_CREATOR(OperatorConf::kReshapeLikeConf, ReshapeLikeKernel);
ADD_DEVICE_TYPE_KERNEL_CREATOR(OperatorConf::kDynamicReshapeLikeConf, ReshapeLikeKernel);
ADD_DEVICE_TYPE_KERNEL_CREATOR(OperatorConf::kDynamicReshapeLikeConf, DynamicReshapeLikeKernel);
} // namespace oneflow
......@@ -11,10 +11,6 @@ import "oneflow/core/register/tensor_slice_view.proto";
import "oneflow/core/job/sbp_parallel.proto";
import "oneflow/core/register/blob_desc.proto";
message ReduceSumKernelConf {
required ShapeProto kept_dims_shape = 1;
}
message DecodeRandomKernelConf {
required uint32 random_seed = 1;
}
......@@ -122,7 +118,6 @@ message KernelConf {
UserKernelConf user_conf = 100;
DecodeRandomKernelConf decode_random_conf = 103;
DecodeOFRecordKernelConf decode_ofrecord_conf = 104;
ReduceSumKernelConf reduce_sum_conf = 120;
ReduceGatherKernelConf reduce_gather_conf = 350;
XrtLaunchKernelConf xrt_launch_conf = 353;
UniqueWithCountsKernelConf unique_with_counts_conf = 354;
......
#include "oneflow/core/kernel/reduce_sum_kernel.h"
#include "oneflow/core/ndarray/ndarray_util.h"
namespace oneflow {
namespace {
template<DeviceType device_type, typename T>
void ReduceSumKernel<device_type, T>::ForwardDataContent(
const KernelCtx& ctx, std::function<Blob*(const std::string&)> BnInOp2Blob) const {
const Blob* in_blob = BnInOp2Blob("in");
Blob* out_blob = BnInOp2Blob("out");
Blob* fw_tmp_blob = BnInOp2Blob("fw_tmp");
const ReduceSumOpConf& conf = this->op_conf().reduce_sum_conf();
CHECK(!conf.axis().empty());
const Shape& reduced_shape =
CreateReducedShape(in_blob->shape(), {conf.axis().begin(), conf.axis().end()});
NdarrayUtil<device_type, T>::ReduceSum(
ctx.device_ctx, XpuVarNdarray<T>(reduced_shape, out_blob->mut_dptr<T>()),
XpuVarNdarray<const T>(in_blob, in_blob->shape().NumAxes()),
XpuVarNdarray<T>(fw_tmp_blob, in_blob->shape().NumAxes()));
}
ADD_DEFAULT_KERNEL_CREATOR_WITH_GPU_HALF(OperatorConf::kReduceSumConf, ReduceSumKernel,
ARITHMETIC_DATA_TYPE_SEQ);
} // namespace
} // namespace oneflow
#ifndef ONEFLOW_CORE_KERNEL_REDUCE_SUM_KERNEL_H_
#define ONEFLOW_CORE_KERNEL_REDUCE_SUM_KERNEL_H_
#include "oneflow/core/kernel/kernel.h"
namespace oneflow {
namespace {
template<DeviceType device_type, typename T>
class ReduceSumKernel final : public KernelIf<device_type> {
public:
OF_DISALLOW_COPY_AND_MOVE(ReduceSumKernel);
ReduceSumKernel() = default;
~ReduceSumKernel() = default;
private:
void ForwardDataContent(const KernelCtx& ctx,
std::function<Blob*(const std::string&)> BnInOp2Blob) const override;
};
} // namespace
} // namespace oneflow
#endif // ONEFLOW_CORE_KERNEL_REDUCE_SUM_KERNEL_H_
#include "oneflow/core/kernel/reduce_sum_like_kernel.h"
#include "oneflow/core/ndarray/ndarray_util.h"
namespace oneflow {
template<DeviceType device_type, typename T>
void ReduceSumLikeKernel<device_type, T>::ForwardDataContent(
const KernelCtx& ctx, std::function<Blob*(const std::string&)> BnInOp2Blob) const {
const Blob* x_blob = BnInOp2Blob("x");
Blob* y_blob = BnInOp2Blob("y");
Blob* temp_storage_blob = BnInOp2Blob("temp_storage");
const ReduceSumLikeOpConf& conf = this->op_conf().reduce_sum_like_conf();
if (conf.axis().empty()) {
CHECK_EQ(x_blob->shape(), y_blob->shape());
y_blob->CopyDataContentFrom(ctx.device_ctx, x_blob);
} else {
NdarrayUtil<device_type, T>::ReduceSum(
ctx.device_ctx,
XpuVarNdarray<T>(
CreateReducedShape(x_blob->shape(), {conf.axis().begin(), conf.axis().end()}),
y_blob->mut_dptr<T>()),
XpuVarNdarray<const T>(x_blob, x_blob->shape().NumAxes()),
XpuVarNdarray<T>(temp_storage_blob, x_blob->shape().NumAxes()));
}
}
ADD_DEFAULT_KERNEL_CREATOR(OperatorConf::kReduceSumLikeConf, ReduceSumLikeKernel,
ARITHMETIC_DATA_TYPE_SEQ);
} // namespace oneflow
#ifndef ONEFLOW_CORE_KERNEL_REDUCE_SUM_LIKE_KERNEL_H_
#define ONEFLOW_CORE_KERNEL_REDUCE_SUM_LIKE_KERNEL_H_
#include "oneflow/core/kernel/kernel.h"
namespace oneflow {
template<DeviceType device_type, typename T>
class ReduceSumLikeKernel final : public KernelIf<device_type> {
public:
OF_DISALLOW_COPY_AND_MOVE(ReduceSumLikeKernel);
ReduceSumLikeKernel() = default;
~ReduceSumLikeKernel() = default;
private:
void ForwardDataContent(const KernelCtx& ctx,
std::function<Blob*(const std::string&)> BnInOp2Blob) const override;
};
} // namespace oneflow
#endif // ONEFLOW_CORE_KERNEL_REDUCE_SUM_LIKE_KERNEL_H_
#ifndef ONEFLOW_CORE_KERNEL_RESHAPE_KERNEL_H_
#define ONEFLOW_CORE_KERNEL_RESHAPE_KERNEL_H_
#include "oneflow/core/kernel/kernel.h"
#include "oneflow/core/kernel/kernel_context.h"
namespace oneflow {
template<DeviceType device_type>
class ReshapeKernel final : public KernelIf<device_type> {
public:
OF_DISALLOW_COPY_AND_MOVE(ReshapeKernel);
ReshapeKernel() = default;
~ReshapeKernel() = default;
private:
void ForwardDataContent(const KernelCtx&,
std::function<Blob*(const std::string&)>) const override;
};
} // namespace oneflow
#endif // ONEFLOW_CORE_KERNEL_RESHAPE_KERNEL_H_
#ifndef ONEFLOW_CORE_KERNEL_RESHAPE_LIKE_KERNEL_H_
#define ONEFLOW_CORE_KERNEL_RESHAPE_LIKE_KERNEL_H_
#include "oneflow/core/kernel/kernel.h"
#include "oneflow/core/kernel/kernel_context.h"
namespace oneflow {
template<DeviceType device_type>
class ReshapeLikeKernel final : public KernelIf<device_type> {
public:
OF_DISALLOW_COPY_AND_MOVE(ReshapeLikeKernel);
ReshapeLikeKernel() = default;
~ReshapeLikeKernel() = default;
private:
void ForwardDataContent(const KernelCtx&,
std::function<Blob*(const std::string&)>) const override;
};
} // namespace oneflow
#endif // ONEFLOW_CORE_KERNEL_RESHAPE_LIKE_KERNEL_H_
#include "oneflow/core/operator/broadcast_like_op.h"
#include "oneflow/core/operator/reduce_sbp_util.h"
#include "oneflow/core/job/sbp_signature_builder.h"
namespace oneflow {
void BroadcastLikeOp::InitFromOpConf() {
EnrollInputBn("x");
EnrollInputBn("like", false)->set_use_header_only(true);
EnrollOutputBn("y");
}
const PbMessage& BroadcastLikeOp::GetCustomizedConf() const {
return op_conf().broadcast_like_conf();
}
Maybe<void> BroadcastLikeOp::InferBlobDescs(
std::function<BlobDesc*(const std::string&)> GetBlobDesc4BnInOp,
const ParallelContext* parallel_ctx) const {
GetBlobDesc4BnInOp("y")->CopyMetaFrom(*GetBlobDesc4BnInOp("like"));
return Maybe<void>::Ok();
}
Maybe<void> BroadcastLikeOp::GetSbpSignatures(
const std::function<Maybe<const BlobDesc*>(const std::string&)>& LogicalBlobDesc4Ibn,
SbpSignatureList* sbp_sig_list) const {
int32_t num_axes = JUST(LogicalBlobDesc4Ibn("like"))->shape().NumAxes();
auto IsReducedAxis = ReduceSbpUtil::MakePredicatorIsReducedAxis(
op_conf().broadcast_like_conf().reduced_axis(), num_axes);
FOR_RANGE(int64_t, i, 0, num_axes) {
if (IsReducedAxis(i)) {
SbpSignatureBuilder()
.Broadcast("x")
.Split("like", i)
.Split(output_bns(), i)
.Build(sbp_sig_list->mutable_sbp_signature()->Add());
} else {
SbpSignatureBuilder()
.Split(input_bns(), i)
.Split(output_bns(), i)
.Build(sbp_sig_list->mutable_sbp_signature()->Add());
}
}
return Maybe<void>::Ok();
}
REGISTER_OP(OperatorConf::kBroadcastLikeConf, BroadcastLikeOp);
} // namespace oneflow
#ifndef ONEFLOW_CORE_OPERATOR_BROADCAST_LIKE_OP_H_
#define ONEFLOW_CORE_OPERATOR_BROADCAST_LIKE_OP_H_
#include "oneflow/core/operator/operator.h"
namespace oneflow {
class BroadcastLikeOp final : public Operator {
public:
OF_DISALLOW_COPY_AND_MOVE(BroadcastLikeOp);
BroadcastLikeOp() = default;
~BroadcastLikeOp() = default;
void InitFromOpConf() override;
const PbMessage& GetCustomizedConf() const override;
Maybe<void> InferBlobDescs(std::function<BlobDesc*(const std::string&)> GetBlobDesc4BnInOp,
const ParallelContext* parallel_ctx) const override;
private:
Maybe<void> InferBatchAxis(
std::function<OptInt64*(const std::string&)> BatchAxis4BnInOp) const override {
*BatchAxis4BnInOp("y") = *BatchAxis4BnInOp("like");
return Maybe<void>::Ok();
}
Maybe<void> GetSbpSignatures(
const std::function<Maybe<const BlobDesc*>(const std::string&)>& LogicalBlobDesc4Ibn,
SbpSignatureList* sbp_sig_list) const override;
};
} // namespace oneflow
#endif // ONEFLOW_CORE_OPERATOR_BROADCAST_LIKE_OP_H_
......@@ -445,33 +445,6 @@ message LazyAdamModelUpdateOpConf {
message AccumulateOpConf {
}
message ReduceSumOpConf {
required string in = 1;
required string out = 3;
repeated int32 axis = 4;
optional bool keep_dims = 5 [default = false];
}
message ReduceSumLikeOpConf {
required string x = 1;
required string like = 2;
required string y = 3;
repeated int32 axis = 4;
optional string temp_storage = 5 [default = "temp_storage"];
}
message ReshapeOpConf {
required string in = 1;
required string out = 2;
required ShapeProto shape = 3;
}
message ReshapeLikeOpConf {
required string x = 1;
required string y = 2;
required string like = 3;
}
message DynamicReshapeOpConf {
required string in = 1;
required string out = 2;
......@@ -696,13 +669,6 @@ message RepeatOpConf {
required int32 repeat_num = 3;
}
message BroadcastLikeOpConf {
required string x = 1;
required string like = 2;
required string y = 3;
repeated int32 reduced_axis = 4;
}
message ConstantOpConf {
optional string tick = 1;
required string out = 2;
......@@ -1165,9 +1131,7 @@ message OperatorConf {
// domain op
TupleIdentityOpConf tuple_identity_conf = 200;
ReshapeOpConf reshape_conf = 202;
DynamicReshapeOpConf dynamic_reshape_conf = 203;
ReduceSumOpConf reduce_sum_conf = 219;
AddOpConf add_conf = 220;
PackOpConf pack_conf = 237;
UnpackOpConf unpack_conf = 238;
......@@ -1175,9 +1139,6 @@ message OperatorConf {
ConstantOpConf constant_conf = 255;
SigmoidCrossEntropyOpConf sigmoid_cross_entropy_conf = 257;
PReluOpConf prelu_conf = 265;
ReduceSumLikeOpConf reduce_sum_like_conf = 270;
BroadcastLikeOpConf broadcast_like_conf = 277;
ReshapeLikeOpConf reshape_like_conf = 286;
DynamicReshapeLikeOpConf dynamic_reshape_like_conf = 287;
IdentityOpConf identity_conf = 290;
CaseOpConf case_conf = 291;
......
#include "oneflow/core/operator/reduce_sum_like_op.h"
#include "oneflow/core/operator/reduce_sbp_util.h"
#include "oneflow/core/kernel/kernel_util.h"
#include "oneflow/core/job/sbp_signature_builder.h"
namespace oneflow {
namespace {}
void ReduceSumLikeOp::InitFromOpConf() {
CHECK(op_conf().has_reduce_sum_like_conf());
EnrollInputBn("x");
EnrollInputBn("like", false)->set_use_header_only(true);
EnrollOutputBn("y");
EnrollTmpBn("temp_storage");
}
const PbMessage& ReduceSumLikeOp::GetCustomizedConf() const {
return op_conf().reduce_sum_like_conf();
}
Maybe<void> ReduceSumLikeOp::InferBlobDescs(
std::function<BlobDesc*(const std::string&)> GetBlobDesc4BnInOp, const ParallelContext*) const {
const ReduceSumLikeOpConf& conf = op_conf().reduce_sum_like_conf();
BlobDesc* x_blob = GetBlobDesc4BnInOp("x");
BlobDesc* like_blob = GetBlobDesc4BnInOp("like");
if (conf.axis().empty()) { CHECK_EQ_OR_RETURN(x_blob->shape(), like_blob->shape()); }
*GetBlobDesc4BnInOp("temp_storage") = *x_blob;
GetBlobDesc4BnInOp("y")->CopyMetaFrom(*like_blob);
return Maybe<void>::Ok();
}
Maybe<void> ReduceSumLikeOp::InferBatchAxis(
std::function<OptInt64*(const std::string&)> BatchAxis4BnInOp) const {
*BatchAxis4BnInOp("y") = *BatchAxis4BnInOp("like");
*BatchAxis4BnInOp("temp_storage") = *BatchAxis4BnInOp("like");
return Maybe<void>::Ok();
}
Maybe<void> ReduceSumLikeOp::GetSbpSignatures(
const std::function<Maybe<const BlobDesc*>(const std::string&)>& LogicalBlobDesc4Ibn,
SbpSignatureList* sbp_sig_list) const {
int32_t num_axes = JUST(LogicalBlobDesc4Ibn("x"))->shape().NumAxes();
auto IsReducedAxis =
ReduceSbpUtil::MakePredicatorIsReducedAxis(op_conf().reduce_sum_like_conf().axis(), num_axes);
FOR_RANGE(int64_t, i, 0, num_axes) {
if (IsReducedAxis(i)) {
SbpSignatureBuilder()
.Split("x", i)
.Broadcast("like")
.PartialSum(output_bns())
.Build(sbp_sig_list->mutable_sbp_signature()->Add());
} else {
SbpSignatureBuilder()
.Split(input_bns(), i)
.Split(output_bns(), i)
.Build(sbp_sig_list->mutable_sbp_signature()->Add());
}
}
return Maybe<void>::Ok();
}
REGISTER_OP(OperatorConf::kReduceSumLikeConf, ReduceSumLikeOp);
} // namespace oneflow
#ifndef ONEFLOW_CORE_OPERATOR_REDUCE_SUM_LIKE_OP_H_
#define ONEFLOW_CORE_OPERATOR_REDUCE_SUM_LIKE_OP_H_
#include "oneflow/core/operator/operator.h"
namespace oneflow {
class ReduceSumLikeOp final : public Operator {
public:
OF_DISALLOW_COPY_AND_MOVE(ReduceSumLikeOp);
ReduceSumLikeOp() = default;
~ReduceSumLikeOp() = default;
void InitFromOpConf() override;
const PbMessage& GetCustomizedConf() const override;
Maybe<void> InferBlobDescs(std::function<BlobDesc*(const std::string&)> GetBlobDesc4BnInOp,
const ParallelContext* parallel_ctx) const override;
private:
Maybe<void> InferBatchAxis(
std::function<OptInt64*(const std::string&)> BatchAxis4BnInOp) const override;
Maybe<void> GetSbpSignatures(
const std::function<Maybe<const BlobDesc*>(const std::string&)>& LogicalBlobDesc4Ibn,
SbpSignatureList* sbp_sig_list) const override;
};
} // namespace oneflow
#endif // ONEFLOW_CORE_OPERATOR_REDUCE_SUM_LIKE_OP_H_
#include "oneflow/core/operator/reduce_sum_op.h"
#include "oneflow/core/kernel/kernel_util.h"
#include "oneflow/core/operator/reduce_sbp_util.h"
#include "oneflow/core/job/sbp_signature_builder.h"
namespace oneflow {
void ReduceSumOp::InitFromOpConf() {
CHECK(op_conf().has_reduce_sum_conf());
EnrollInputBn("in");
EnrollOutputBn("out");
EnrollTmpBn("fw_tmp");
}
const PbMessage& ReduceSumOp::GetCustomizedConf() const { return op_conf().reduce_sum_conf(); }
Maybe<void> ReduceSumOp::InferBlobDescs(
std::function<BlobDesc*(const std::string&)> GetBlobDesc4BnInOp, const ParallelContext*) const {
const ReduceSumOpConf& conf = op_conf().reduce_sum_conf();
CHECK_OR_RETURN(!conf.axis().empty());
const BlobDesc* in_blob = GetBlobDesc4BnInOp("in");
*GetBlobDesc4BnInOp("fw_tmp") = *in_blob;
BlobDesc* out_blob = GetBlobDesc4BnInOp("out");
out_blob->set_data_type(in_blob->data_type());
const AxisVector axis_vec = {conf.axis().begin(), conf.axis().end()};
const Shape& reduced_shape = CreateReducedShape(in_blob->shape(), axis_vec);
if (conf.keep_dims()) {
out_blob->mut_shape() = reduced_shape;
} else {
out_blob->mut_shape() = reduced_shape.RemoveOnes(axis_vec);
}
return Maybe<void>::Ok();
}
Maybe<void> ReduceSumOp::InferBatchAxis(
std::function<OptInt64*(const std::string&)> BatchAxis4BnInOp) const {
const ReduceSumOpConf& conf = op_conf().reduce_sum_conf();
bool keep_dims = conf.keep_dims();
HashSet<int64_t> conf_axes = {conf.axis().begin(), conf.axis().end()};
const OptInt64* in_batch_axis = BatchAxis4BnInOp("in");
OptInt64* out_batch_axis = BatchAxis4BnInOp("out");
if (in_batch_axis->has_value()) {
if (keep_dims || conf_axes.find(in_batch_axis->value()) == conf_axes.end()) {
*out_batch_axis = *in_batch_axis;
} else {
out_batch_axis->clear_value();
}
} else {
out_batch_axis->clear_value();
}
return Maybe<void>::Ok();
}
Maybe<void> ReduceSumOp::GetSbpSignatures(
const std::function<Maybe<const BlobDesc*>(const std::string&)>& LogicalBlobDesc4Ibn,
SbpSignatureList* sbp_sig_list) const {
int64_t num_axes = JUST(LogicalBlobDesc4Ibn("in"))->shape().NumAxes();
auto IsReducedAxis =
ReduceSbpUtil::MakePredicatorIsReducedAxis(op_conf().reduce_sum_conf().axis(), num_axes);
int64_t num_reduced_axes = 0;
FOR_RANGE(int64_t, i, 0, num_axes) {
if (IsReducedAxis(i)) {
SbpSignatureBuilder()
.Split(input_bns(), i)
.PartialSum(output_bns())
.Build(sbp_sig_list->mutable_sbp_signature()->Add());
num_reduced_axes += 1;
} else {
SbpSignatureBuilder()
.Split(input_bns(), i)
.Split(output_bns(), op_conf().reduce_sum_conf().keep_dims() ? i : i - num_reduced_axes)
.Build(sbp_sig_list->mutable_sbp_signature()->Add());
}
}
return Maybe<void>::Ok();
}
REGISTER_OP(OperatorConf::kReduceSumConf, ReduceSumOp);
} // namespace oneflow
#ifndef ONEFLOW_CORE_OPERATOR_REDUCE_SUM_OP_H_
#define ONEFLOW_CORE_OPERATOR_REDUCE_SUM_OP_H_
#include "oneflow/core/operator/operator.h"
namespace oneflow {
class ReduceSumOp final : public Operator {
public:
OF_DISALLOW_COPY_AND_MOVE(ReduceSumOp);
ReduceSumOp() = default;
~ReduceSumOp() = default;
void InitFromOpConf() override;
const PbMessage& GetCustomizedConf() const override;
Maybe<void> InferBlobDescs(std::function<BlobDesc*(const std::string&)> GetBlobDesc4BnInOp,
const ParallelContext* parallel_ctx) const override;
private:
Maybe<void> InferBatchAxis(
std::function<OptInt64*(const std::string&)> BatchAxis4BnInOp) const override;
Maybe<void> GetSbpSignatures(
const std::function<Maybe<const BlobDesc*>(const std::string&)>& LogicalBlobDesc4Ibn,
SbpSignatureList* sbp_sig_list) const override;
};
} // namespace oneflow
#endif // ONEFLOW_CORE_OPERATOR_REDUCE_SUM_OP_H_
#include "oneflow/core/operator/operator.h"
#include "oneflow/core/job/sbp_signature_builder.h"
#include "oneflow/core/operator/reshape_op_util.h"
namespace oneflow {
class ReshapeLikeOp final : public Operator {
public:
OF_DISALLOW_COPY_AND_MOVE(ReshapeLikeOp);
ReshapeLikeOp() = default;
~ReshapeLikeOp() override = default;
void InitFromOpConf() override;
const PbMessage& GetCustomizedConf() const override;
Maybe<void> InferBlobDescs(std::function<BlobDesc*(const std::string&)> GetBlobDesc4BnInOp,
const ParallelContext* parallel_ctx) const override;
private:
Maybe<void> InferBatchAxis(
std::function<OptInt64*(const std::string&)> BatchAxis4BnInOp) const override {
return NaiveInferBatchAxis(BatchAxis4BnInOp);
}
Maybe<void> GetSbpSignatures(
const std::function<Maybe<const BlobDesc*>(const std::string&)>& LogicalBlobDesc4Ibn,
const ParallelDesc& parallel_desc, SbpSignatureList* sbp_sig_list) const override;
};
void ReshapeLikeOp::InitFromOpConf() {
CHECK(op_conf().has_reshape_like_conf());
EnrollInputBn("x");
EnrollOutputBn("y");
EnrollInputBn("like", false)->set_use_header_only(true);
}
const PbMessage& ReshapeLikeOp::GetCustomizedConf() const { return op_conf().reshape_like_conf(); }
Maybe<void> ReshapeLikeOp::InferBlobDescs(
std::function<BlobDesc*(const std::string&)> GetBlobDesc4BnInOp,
const ParallelContext* parallel_ctx) const {
CHECK_EQ_OR_RETURN(GetBlobDesc4BnInOp("x")->shape().elem_cnt(),
GetBlobDesc4BnInOp("like")->shape().elem_cnt());
GetBlobDesc4BnInOp("y")->CopyMetaFrom(*GetBlobDesc4BnInOp("like"));
return Maybe<void>::Ok();
}
Maybe<void> ReshapeLikeOp::GetSbpSignatures(
const std::function<Maybe<const BlobDesc*>(const std::string&)>& LogicalBlobDesc4Ibn,
const ParallelDesc& parallel_desc, SbpSignatureList* sbp_sig_list) const {
const auto& x_shape = JUST(LogicalBlobDesc4Ibn("x"))->shape();
const auto& like_shape = JUST(LogicalBlobDesc4Ibn("like"))->shape();
SbpSignatureBuilder().PartialSum("like").Broadcast("x").Broadcast("y").Build(
sbp_sig_list->mutable_sbp_signature()->Add());
SbpSignatureBuilder().Broadcast("like").PartialSum("x").PartialSum("y").Build(
sbp_sig_list->mutable_sbp_signature()->Add());
return ReshapeOpUtil::GetReshapeSbpSignatures(
x_shape, like_shape, StdVec2PbRpf<std::string>({"x"}),
StdVec2PbRpf<std::string>({"like", "y"}), parallel_desc.parallel_num(), sbp_sig_list);
}
REGISTER_OP(OperatorConf::kReshapeLikeConf, ReshapeLikeOp);
} // namespace oneflow
#include "oneflow/core/operator/reshape_op.h"
#include "oneflow/core/common/balanced_splitter.h"
#include "oneflow/core/job/sbp_signature_builder.h"
#include "oneflow/core/operator/reshape_op_util.h"
namespace oneflow {
void ReshapeOp::InitFromOpConf() {
CHECK(op_conf().has_reshape_conf());
EnrollInputBn("in");
EnrollOutputBn("out")->set_const_inplace_ibn("in");
}
const PbMessage& ReshapeOp::GetCustomizedConf() const { return op_conf().reshape_conf(); }
Maybe<void> ReshapeOp::InferBlobDescs(
std::function<BlobDesc*(const std::string&)> GetBlobDesc4BnInOp,
const ParallelContext* parallel_ctx, const SbpSignature* sbp_signature) const {
const BlobDesc* in_blob_desc = GetBlobDesc4BnInOp("in");
BlobDesc* out_blob_desc = GetBlobDesc4BnInOp("out");
CHECK_OR_RETURN(in_blob_desc->is_dynamic() == false);
*out_blob_desc = *in_blob_desc;
const ReshapeOpConf& conf = op_conf().reshape_conf();
CHECK_GE_OR_RETURN(conf.shape().dim_size(), 1);
DimVector dim_vec = {conf.shape().dim().begin(), conf.shape().dim().end()};
for (int32_t i = 0; i < dim_vec.size(); ++i) { CHECK_GT_OR_RETURN(dim_vec[i], 0); }
const auto& sbp_parallel_it = sbp_signature->bn_in_op2sbp_parallel().find("out");
CHECK_OR_RETURN(sbp_parallel_it != sbp_signature->bn_in_op2sbp_parallel().end());
const SbpParallel& sbp_parallel = sbp_parallel_it->second;
if (sbp_parallel.has_split_parallel()) {
const int64_t split_axis = sbp_parallel.split_parallel().axis();
BalancedSplitter splitter(conf.shape().dim().Get(split_axis), parallel_ctx->parallel_num());
CHECK_GE_OR_RETURN(conf.shape().dim().Get(split_axis), parallel_ctx->parallel_num());
dim_vec[split_axis] = splitter.At(parallel_ctx->parallel_id()).size();
}
out_blob_desc->mut_shape() = Shape(dim_vec);
CHECK_EQ_OR_RETURN(out_blob_desc->shape().elem_cnt(), in_blob_desc->shape().elem_cnt());
return Maybe<void>::Ok();
}
Maybe<void> ReshapeOp::GetSbpSignatures(
const std::function<Maybe<const BlobDesc*>(const std::string&)>& LogicalBlobDesc4Ibn,
const ParallelDesc& parallel_desc, SbpSignatureList* sbp_sig_list) const {
const auto& in_shape = JUST(LogicalBlobDesc4Ibn("in"))->shape();
const auto& out_shape =
JUST(ReshapeOpUtil::GetLogicalOutBlobShape(in_shape, op_conf().reshape_conf().shape()));
return ReshapeOpUtil::GetReshapeSbpSignatures(in_shape, *out_shape, input_bns(), output_bns(),
parallel_desc.parallel_num(), sbp_sig_list);
}
REGISTER_OP(OperatorConf::kReshapeConf, ReshapeOp);
} // namespace oneflow
#ifndef ONEFLOW_CORE_OPERATOR_RESHAPE_OP_H_
#define ONEFLOW_CORE_OPERATOR_RESHAPE_OP_H_
#include "oneflow/core/operator/operator.h"
namespace oneflow {
class ReshapeOp final : public Operator {
public:
OF_DISALLOW_COPY_AND_MOVE(ReshapeOp);
ReshapeOp() = default;
~ReshapeOp() = default;
void InitFromOpConf() override;
const PbMessage& GetCustomizedConf() const override;
Maybe<void> InferBlobDescs(std::function<BlobDesc*(const std::string&)> GetBlobDesc4BnInOp,
const ParallelContext* parallel_ctx,
const SbpSignature* sbp_signature) const override;
private:
Maybe<void> InferBatchAxis(
std::function<OptInt64*(const std::string&)> BatchAxis4BnInOp) const override {
return NaiveInferBatchAxis(BatchAxis4BnInOp);
}
Maybe<void> GetSbpSignatures(
const std::function<Maybe<const BlobDesc*>(const std::string&)>& LogicalBlobDesc4Ibn,
const ParallelDesc& parallel_desc, SbpSignatureList* sbp_sig_list) const override;
};
} // namespace oneflow
#endif // ONEFLOW_CORE_OPERATOR_RESHAPE_OP_H_
from __future__ import absolute_import
import operator
from functools import reduce
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.interpret_util as interpret_util
import oneflow.python.framework.distribute as distribute_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.remote_blob as remote_blob_util
from oneflow.python.oneflow_export import oneflow_export
@oneflow_export("experimental.broadcast_maximum")
def broadcast_maximum(a, b, name=None):
op_conf = op_conf_util.OperatorConf()
if name is None:
op_conf.name = id_util.UniqueStr("BroadcastMaximum_")
else:
op_conf.name = name
op_conf.broadcast_maximum_conf.a = a.unique_name
op_conf.broadcast_maximum_conf.b = b.unique_name
op_conf.broadcast_maximum_conf.out = "out"
interpret_util.Forward(op_conf)
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
lbi.blob_name = "out"
return remote_blob_util.RemoteBlob(lbi)
from __future__ import absolute_import
import operator
from functools import reduce
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.interpret_util as interpret_util
import oneflow.python.framework.distribute as distribute_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.remote_blob as remote_blob_util
from oneflow.python.oneflow_export import oneflow_export
@oneflow_export("experimental.broadcast_minimum")
def broadcast_minimum(a, b, name=None):
op_conf = op_conf_util.OperatorConf()
if name is None:
op_conf.name = id_util.UniqueStr("BroadcastMinimum_")
else:
op_conf.name = name
op_conf.broadcast_minimum_conf.a = a.unique_name
op_conf.broadcast_minimum_conf.b = b.unique_name
op_conf.broadcast_minimum_conf.out = "out"
interpret_util.Forward(op_conf)
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
lbi.blob_name = "out"
return remote_blob_util.RemoteBlob(lbi)
......@@ -102,7 +102,7 @@ def reshape(x, shape, name=None):
shape = list(shape)
assert all(dim == -1 or dim > 0 for dim in shape)
assert shape.count(-1) <= 1
if (not x.is_dynamic) and (os.getenv("ENABLE_USER_OP") != "False"):
if not x.is_dynamic:
if name is None:
name = id_util.UniqueStr("Reshape_")
return (
......@@ -117,20 +117,14 @@ def reshape(x, shape, name=None):
)
else:
op_conf = op_conf_util.OperatorConf()
if x.is_dynamic:
setattr(
op_conf,
"name",
name if name is not None else id_util.UniqueStr("DynamicReshape_"),
)
setattr(op_conf.dynamic_reshape_conf, "in", x.unique_name)
op_conf.dynamic_reshape_conf.shape.dim.extend(list(shape))
setattr(op_conf.dynamic_reshape_conf, "out", "out")
else:
op_conf.name = id_util.UniqueStr("Reshape_" + x.op_name)
setattr(op_conf.reshape_conf, "in", x.unique_name)
op_conf.reshape_conf.shape.dim[:] = list(infer_shape(x, shape))
op_conf.reshape_conf.out = "out"
setattr(
op_conf,
"name",
name if name is not None else id_util.UniqueStr("DynamicReshape_"),
)
setattr(op_conf.dynamic_reshape_conf, "in", x.unique_name)
op_conf.dynamic_reshape_conf.shape.dim.extend(list(shape))
setattr(op_conf.dynamic_reshape_conf, "out", "out")
interpret_util.Forward(op_conf)
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
......@@ -140,30 +134,18 @@ def reshape(x, shape, name=None):
@oneflow_export("reshape_like")
def reshape_like(x, like, name=None):
if os.getenv("ENABLE_USER_OP") != "False":
if name is None:
name = id_util.UniqueStr("ReshapeLike_")
return (
flow.user_op_builder(name)
.Op("reshape_like")
.Input("in", [x])
.Input("like", [like])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
else:
op_conf = op_conf_util.OperatorConf()
op_conf.name = id_util.UniqueStr("ReshapeLike_")
setattr(op_conf.reshape_like_conf, "x", x.unique_name)
setattr(op_conf.reshape_like_conf, "like", like.unique_name)
op_conf.reshape_like_conf.y = "y"
interpret_util.Forward(op_conf)
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
lbi.blob_name = "y"
return remote_blob_util.RemoteBlob(lbi)
if name is None:
name = id_util.UniqueStr("ReshapeLike_")
return (
flow.user_op_builder(name)
.Op("reshape_like")
.Input("in", [x])
.Input("like", [like])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("dynamic_reshape")
......
......@@ -66,29 +66,16 @@ def reduce_sum(input_tensor, axis=None, keepdims=False, name=None):
if len(axis) == 0:
return input_tensor
if os.getenv("ENABLE_USER_OP") != "False":
op = (
flow.user_op_builder(name)
.Op("reduce_sum")
.Input("input_tensor", [input_tensor])
.Output("output_tensor")
.Attr("axis", axis, "AttrTypeListInt32")
.Attr("keepdims", keepdims, "AttrTypeBool")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
else:
op_conf = op_conf_util.OperatorConf()
setattr(op_conf, "name", name)
setattr(op_conf.reduce_sum_conf, "in", input_tensor.unique_name)
setattr(op_conf.reduce_sum_conf, "out", "out")
op_conf.reduce_sum_conf.axis[:] = list(axis)
setattr(op_conf.reduce_sum_conf, "keep_dims", keepdims)
interpret_util.Forward(op_conf)
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
lbi.blob_name = "out"
return remote_blob_util.RemoteBlob(lbi)
op = (
flow.user_op_builder(name)
.Op("reduce_sum")
.Input("input_tensor", [input_tensor])
.Output("output_tensor")
.Attr("axis", axis, "AttrTypeListInt32")
.Attr("keepdims", keepdims, "AttrTypeBool")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("math.reduce_any")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册