Skip to content

Commit

Permalink
[codemod][caffe2] Run clang-format - 5/7
Browse files Browse the repository at this point in the history
Summary:
This directory is opted-in to clang-format but is not format-clean. This blocks continuous formatting from being enabled on fbcode, and causes hassle for other codemods that leave inconsistent formatting. This diff runs clang-format, which is widely used and considered safe.

If you are unhappy with the formatting of a particular block, please *accept this diff* and then in a stacked commit undo the change and wrap that code in `// clang-format off` and `// clang-format on`, or `/* clang-format off */` and `/* clang-format on */`.

drop-conflicts

Test Plan: sandcastleit

Reviewed By: jerryzh168

Differential Revision: D22311706

fbshipit-source-id: 1ca59a82e96156a4a5dfad70ba3e64d44c5e762a
  • Loading branch information
iahs authored and facebook-github-bot committed Jun 30, 2020
1 parent 29aef8f commit 0ddaaf6
Show file tree
Hide file tree
Showing 50 changed files with 341 additions and 257 deletions.
20 changes: 12 additions & 8 deletions caffe2/mpi/mpi_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,16 @@ inline void CheckInitializedMPI() {
CAFFE_ENFORCE(flag, "MPI does not seem to have been initialized.");
}

template <typename T> class MPIDataTypeWrapper;

#define MPI_DATATYPE_WRAPPER(c_type, mpi_type) \
template<> class MPIDataTypeWrapper<c_type> { \
public: \
inline static MPI_Datatype type() { return mpi_type; } \
template <typename T>
class MPIDataTypeWrapper;

#define MPI_DATATYPE_WRAPPER(c_type, mpi_type) \
template <> \
class MPIDataTypeWrapper<c_type> { \
public: \
inline static MPI_Datatype type() { \
return mpi_type; \
} \
};

MPI_DATATYPE_WRAPPER(char, MPI_CHAR)
Expand Down Expand Up @@ -150,6 +154,6 @@ void MPISetupPeers(
const int replicas,
const string& role,
const string& job_path);
} // namespace caffe2
} // namespace caffe2

#endif // CAFFE2_MPI_MPI_COMMON_H_
#endif // CAFFE2_MPI_MPI_COMMON_H_
4 changes: 2 additions & 2 deletions caffe2/mpi/mpi_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,6 @@ class MPIReceiveTensorOp final : public Operator<Context> {
OUTPUT_TAGS(OUTPUT, SRC_OUT, TAG_OUT);
};

} // namespace caffe2
} // namespace caffe2

#endif // CAFFE2_MPI_MPI_OPS_H_
#endif // CAFFE2_MPI_MPI_OPS_H_
10 changes: 6 additions & 4 deletions caffe2/observers/runcnt_observer.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@ namespace caffe2 {

class RunCountNetObserver;

class CAFFE2_API RunCountOperatorObserver final : public ObserverBase<OperatorBase> {
class CAFFE2_API RunCountOperatorObserver final
: public ObserverBase<OperatorBase> {
public:
explicit RunCountOperatorObserver(OperatorBase* op) = delete;
RunCountOperatorObserver(OperatorBase* op, RunCountNetObserver* netObserver);
Expand All @@ -26,9 +27,10 @@ class CAFFE2_API RunCountOperatorObserver final : public ObserverBase<OperatorBa
RunCountNetObserver* netObserver_;
};

class CAFFE2_API RunCountNetObserver final : public OperatorAttachingNetObserver<
RunCountOperatorObserver,
RunCountNetObserver> {
class CAFFE2_API RunCountNetObserver final
: public OperatorAttachingNetObserver<
RunCountOperatorObserver,
RunCountNetObserver> {
public:
explicit RunCountNetObserver(NetBase* subject_)
: OperatorAttachingNetObserver<
Expand Down
5 changes: 3 additions & 2 deletions caffe2/observers/time_observer.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,9 @@ class CAFFE2_API TimeCounter {
int iterations_ = 0;
};

class CAFFE2_API TimeOperatorObserver final : public TimeCounter,
public ObserverBase<OperatorBase> {
class CAFFE2_API TimeOperatorObserver final
: public TimeCounter,
public ObserverBase<OperatorBase> {
public:
explicit TimeOperatorObserver(OperatorBase* subject) = delete;
explicit TimeOperatorObserver(
Expand Down
37 changes: 22 additions & 15 deletions caffe2/onnx/backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,8 @@ ::google::protobuf::RepeatedField<::google::protobuf::int64>
OnnxAttributes::get(const std::string& key) const;

template <>
::google::protobuf::RepeatedField<float>
OnnxAttributes::get(const std::string& key) const;
::google::protobuf::RepeatedField<float> OnnxAttributes::get(
const std::string& key) const;

template <>
const TensorProto* OnnxAttributes::get(const std::string& key) const;
Expand All @@ -137,7 +137,7 @@ class CAFFE2_API Caffe2Backend {
// from releasing the object
Caffe2Backend(DummyName* dummy = nullptr) {
if (dummy) {
dummy_ = std::shared_ptr<DummyName>(dummy, [](DummyName *){});
dummy_ = std::shared_ptr<DummyName>(dummy, [](DummyName*) {});
} else {
dummy_ = std::make_shared<DummyName>();
}
Expand Down Expand Up @@ -173,7 +173,9 @@ class CAFFE2_API Caffe2Backend {
bool include_initializers,
const std::vector<Caffe2Ops>& extras);

void CheckOpSchemaArguments(const caffe2::OpSchema& schema, const caffe2::OperatorDef& op);
void CheckOpSchemaArguments(
const caffe2::OpSchema& schema,
const caffe2::OperatorDef& op);

Caffe2Ops OnnxNodeToCaffe2Ops(
const ModelProto& init_model,
Expand Down Expand Up @@ -217,16 +219,19 @@ class CAFFE2_API Caffe2Backend {

Caffe2Ops CreateSlice(OnnxNode* onnx_node, const ConversionContext& ctx);

std::string PreprocessSliceIndexTensor(OnnxNode* onnx_node,
Caffe2Ops& ret,
std::string indices_tensor,
std::string axes_tensor,
std::string rank_tensor,
std::string zero_tensor,
std::string one_tensor,
int default_value);

Caffe2Ops CreateDynamicSlice(OnnxNode* onnx_node, const ConversionContext& ctx);
std::string PreprocessSliceIndexTensor(
OnnxNode* onnx_node,
Caffe2Ops& ret,
std::string indices_tensor,
std::string axes_tensor,
std::string rank_tensor,
std::string zero_tensor,
std::string one_tensor,
int default_value);

Caffe2Ops CreateDynamicSlice(
OnnxNode* onnx_node,
const ConversionContext& ctx);

Caffe2Ops CreateSplit(OnnxNode* onnx_node, const ConversionContext& ctx);

Expand All @@ -240,7 +245,9 @@ class CAFFE2_API Caffe2Backend {

Caffe2Ops CreateNonZeroOp(OnnxNode* onnx_node, const ConversionContext& ctx);

Caffe2Ops CreateMultinomialOp(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateMultinomialOp(
OnnxNode* onnx_node,
const ConversionContext& ctx);

Caffe2Ops CreateBatchNormalization(
OnnxNode* onnx_node,
Expand Down
6 changes: 4 additions & 2 deletions caffe2/onnx/backend_rep.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
#include <string>
#include <vector>

namespace caffe2 { namespace onnx {
namespace caffe2 {
namespace onnx {
class CAFFE2_API Caffe2BackendRep {
public:
void Run(
Expand Down Expand Up @@ -45,4 +46,5 @@ class CAFFE2_API Caffe2BackendRep {
std::vector<std::string> uninitialized_inputs_;
std::unique_ptr<caffe2::Predictor> predictor_{nullptr};
};
}}
} // namespace onnx
} // namespace caffe2
13 changes: 8 additions & 5 deletions caffe2/onnx/device.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,24 @@
#include <functional>
#include <string>

namespace caffe2 { namespace onnx {
namespace caffe2 {
namespace onnx {

enum class DeviceType {CPU=0, CUDA=1};
enum class DeviceType { CPU = 0, CUDA = 1 };

struct Device {
Device(const std::string& spec);
DeviceType type;
int device_id{-1};
};

}}
} // namespace onnx
} // namespace caffe2

namespace std {
template <> struct hash<caffe2::onnx::DeviceType> {
std::size_t operator()(const caffe2::onnx::DeviceType &k) const {
template <>
struct hash<caffe2::onnx::DeviceType> {
std::size_t operator()(const caffe2::onnx::DeviceType& k) const {
return std::hash<int>()(static_cast<int>(k));
}
};
Expand Down
1 change: 1 addition & 0 deletions caffe2/onnx/onnx_exporter.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ class CAFFE2_API OnnxExporter {
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);

void InitOpToTensorProto(const caffe2::OperatorDef& def, TensorProto* tensor);

private:
ConvertedResult CommonCaffe2OpToOnnxNodes(const caffe2::OperatorDef& def);

Expand Down
2 changes: 1 addition & 1 deletion caffe2/operators/accuracy_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class AccuracyOp final : public Operator<Context> {
bool RunOnDevice() override;

protected:
int top_k_;
int top_k_;
INPUT_TAGS(PREDICTION, LABEL);
};

Expand Down
3 changes: 1 addition & 2 deletions caffe2/operators/affine_channel_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,7 @@ class AffineChannelOp final : public Operator<Context> {
const int N = X.dim32(0);
const int C = X.dim32(ndim - 1);
const int HxW = X.numel() / (N * C);
auto* Y =
Output(0, X.sizes(), at::dtype<T>());
auto* Y = Output(0, X.sizes(), at::dtype<T>());
math::AffineChannel<T, Context, StorageOrder::NHWC>(
N,
C,
Expand Down
2 changes: 1 addition & 1 deletion caffe2/operators/bbox_transform_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
#ifndef BBOX_TRANSFORM_OP_H_
#define BBOX_TRANSFORM_OP_H_

#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
Expand Down
2 changes: 1 addition & 1 deletion caffe2/operators/box_with_nms_limit_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
#ifndef BOX_WITH_NMS_AND_LIMIT_OP_H_
#define BOX_WITH_NMS_AND_LIMIT_OP_H_

#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/operator.h"

C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(BoxWithNMSLimit)
Expand Down
6 changes: 3 additions & 3 deletions caffe2/operators/cast_op.h
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
#pragma once

#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/cast.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/types.h"

namespace caffe2 {

Expand Down Expand Up @@ -52,4 +52,4 @@ class CastOp : public Operator<Context> {
bool (CastOp::*body_)();
};

} // namespace caffe2
} // namespace caffe2
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
#ifndef CAFFE2_OPERATORS_COLLECT_AND_DISTRIBUTE_FPN_RPN_PROPOSALS_OP_H_
#define CAFFE2_OPERATORS_COLLECT_AND_DISTRIBUTE_FPN_RPN_PROPOSALS_OP_H_

#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
Expand Down
2 changes: 1 addition & 1 deletion caffe2/operators/conditional_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,6 @@ class ConditionalOp final : public Operator<Context> {
bool RunOnDevice() override;
};

} // caffe2
} // namespace caffe2

#endif
8 changes: 5 additions & 3 deletions caffe2/operators/conv_transpose_op_mobile.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,9 @@ class ConvTransposeMobileOp final : public ConvTransposeUnpoolBase<Context> {
USE_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS(Context);
ConvTransposeMobileOp(const OperatorDef& operator_def, Workspace* ws)
: ConvTransposeUnpoolBase<Context>(operator_def, ws) {
OPERATOR_NEEDS_FEATURE(order_ == StorageOrder::NCHW, "Only NCHW order is supported right now.");
OPERATOR_NEEDS_FEATURE(
order_ == StorageOrder::NCHW,
"Only NCHW order is supported right now.");
OPERATOR_NEEDS_FEATURE(
this->pad_l() == 0, "operator does not handle row width padding");
OPERATOR_NEEDS_FEATURE(
Expand All @@ -29,8 +31,8 @@ class ConvTransposeMobileOp final : public ConvTransposeUnpoolBase<Context> {
bool RunOnDeviceWithOrderNHWC() override;

private:
// We store a numThreasds per-worker tiles of Y, and numThreads per-worker threadBuffer for the
// gemm output, laid out in that order.
// We store a numThreasds per-worker tiles of Y, and numThreads per-worker
// threadBuffer for the gemm output, laid out in that order.
Tensor threadBuffer_{CPU};

// Input: X, W, b
Expand Down
2 changes: 1 addition & 1 deletion caffe2/operators/create_scope_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ class CAFFE2_API WorkspaceStack {
int top_;
std::vector<std::shared_ptr<Workspace>> workspaces_;
};
}
} // namespace detail

template <class Context>
class CreateScopeOp final : public Operator<Context> {
Expand Down
25 changes: 19 additions & 6 deletions caffe2/operators/deform_conv_op_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,22 @@ bool DeformConvOp<T, Context>::RunOnDeviceWithOrderNCHW() {
bias_data = Input(BIAS).template data<T>();
}

auto f = [this, &filter_offset, &bias_data, &X, &buffer_shape, &N, &Xdata, &offset_data, &M, &filter, &output_image_size, &kernel_dim, &Ydata, &input_offset, &offset_offset, &output_offset] (Tensor* col_buffer) {
auto f = [this,
&filter_offset,
&bias_data,
&X,
&buffer_shape,
&N,
&Xdata,
&offset_data,
&M,
&filter,
&output_image_size,
&kernel_dim,
&Ydata,
&input_offset,
&offset_offset,
&output_offset](Tensor* col_buffer) {
col_buffer->Resize(buffer_shape);
T* col_buffer_data = col_buffer->template mutable_data<T>();
// Im2col, followed by gemm.
Expand Down Expand Up @@ -196,8 +211,7 @@ bool DeformConvGradientOp<T, Context>::RunOnDeviceWithOrderNCHW() {
auto& offset = Input(OFFSET);
auto& filter = Input(FILTER);
auto& dY = Input(OUTPUT_GRAD);



const int N = X.dim32(0), C = X.dim32(1);

const vector<int> input_dims = this->GetDims(X);
Expand Down Expand Up @@ -303,7 +317,6 @@ bool DeformConvGradientOp<T, Context>::RunOnDeviceWithOrderNCHW() {

T* dbias_data = nullptr;
if (!no_bias_) {

auto* dbias = Output(BIAS_OR_INPUT_GRAD, {M}, at::dtype<T>());
if (bias_multiplier_.numel() != output_image_size) {
// If the helper bias multiplier is not M, reshape and fill it with one.
Expand All @@ -323,8 +336,8 @@ bool DeformConvGradientOp<T, Context>::RunOnDeviceWithOrderNCHW() {

T* dXdata = nullptr;
if (OutputSize() == 4 || (no_bias_ && (OutputSize() == 3))) {

auto* dX = Output(no_bias_ ? BIAS_OR_INPUT_GRAD : INPUT_GRAD, X.sizes(), at::dtype<T>());
auto* dX = Output(
no_bias_ ? BIAS_OR_INPUT_GRAD : INPUT_GRAD, X.sizes(), at::dtype<T>());
dXdata = dX->template mutable_data<T>();
math::Set<T, Context>(dX->numel(), 0, dXdata, &context_);
}
Expand Down
2 changes: 1 addition & 1 deletion caffe2/operators/elementwise_linear_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,4 +38,4 @@ class ElementwiseLinearGradientOp final : public Operator<Context> {

} // namespace caffe2

#endif // CAFFE2_OPERATORS_ELEMENTWISE_LINEAR_OP_H_
#endif // CAFFE2_OPERATORS_ELEMENTWISE_LINEAR_OP_H_
Loading

0 comments on commit 0ddaaf6

Please sign in to comment.