Skip to content

Commit

Permalink
use oss config for clang-format
Browse files Browse the repository at this point in the history
  • Loading branch information
Ailing Zhang authored and dlibenzi committed Jul 8, 2019
1 parent cab477c commit c0f348d
Show file tree
Hide file tree
Showing 57 changed files with 410 additions and 298 deletions.
155 changes: 155 additions & 0 deletions .clang-format
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
---
Language: Cpp
# BasedOnStyle: Google
AccessModifierOffset: -1
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: true
#AllowAllArgumentsOnNextLine: true
#AllowAllConstructorInitializersOnNextLine: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: All
#AllowShortLambdasOnASingleLine: All
AllowShortIfStatementsOnASingleLine: true
#AllowShortIfStatementsOnASingleLine: WithoutElse
AllowShortLoopsOnASingleLine: true
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
#AfterCaseLabel: false
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
SplitEmptyFunction: true
SplitEmptyRecord: true
SplitEmptyNamespace: true
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Attach
BreakBeforeInheritanceComma: false
BreakInheritanceList: BeforeColon
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: true
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: true
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Regroup
IncludeCategories:
- Regex: '^<ext/.*\.h>'
Priority: 2
- Regex: '^<.*\.h>'
Priority: 1
- Regex: '^<.*'
Priority: 2
- Regex: '.*'
Priority: 3
IncludeIsMainRegex: '([-_](test|unittest))?$'
IndentCaseLabels: true
IndentPPDirectives: None
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Never
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
RawStringFormats:
- Language: Cpp
Delimiters:
- cc
- CC
- cpp
- Cpp
- CPP
- 'c++'
- 'C++'
CanonicalDelimiter: ''
BasedOnStyle: google
- Language: TextProto
Delimiters:
- pb
- PB
- proto
- PROTO
EnclosingFunctions:
- EqualsProto
- EquivToProto
- PARSE_PARTIAL_TEXT_PROTO
- PARSE_TEST_PROTO
- PARSE_TEXT_PROTO
- ParseTextOrDie
- ParseTextProtoOrDie
CanonicalDelimiter: ''
BasedOnStyle: google
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
#SpaceAfterLogicalNot: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Auto
#StatementMacros:
# - Q_UNUSED
# - QT_REQUIRE_VERSION
TabWidth: 8
UseTab: Never
1 change: 1 addition & 0 deletions torch_xla/csrc/batch_norm.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#include "torch_xla/csrc/batch_norm.h"

#include "torch_xla/csrc/helpers.h"

namespace torch_xla {
Expand Down
1 change: 1 addition & 0 deletions torch_xla/csrc/cross_replica_reduces.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#include "torch_xla/csrc/cross_replica_reduces.h"

#include <vector>

#include "torch_xla/csrc/helpers.h"

namespace torch_xla {
Expand Down
75 changes: 35 additions & 40 deletions torch_xla/csrc/init_python_bindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -242,52 +242,47 @@ void InitXlaModuleBindings(py::module m) {
m.def("_xla_set_default_device",
[](const std::string& device) { return SetCurrentDevice(device); });
m.def("_xla_get_default_device", []() { return GetCurrentDevice(); });
m.def(
"_xla_sync_multi",
[](const std::vector<at::Tensor>& tensors,
const std::vector<std::string>& devices, bool wait,
bool sync_xla_data) {
NoGilSection nogil;
SyncTensors(tensors, devices, wait, sync_xla_data);
},
py::arg("tensors"), py::arg("devices"), py::arg("wait") = true,
py::arg("sync_xla_data") = true);
m.def(
"_xla_sync_live_tensors",
[](const std::string& device, const std::vector<std::string>& devices,
bool wait) {
NoGilSection nogil;
SyncLiveTensors(device, devices, wait);
},
py::arg("device") = "", py::arg("devices"), py::arg("wait") = true);
m.def(
"_xla_step_marker",
[](const std::string& device, const std::vector<std::string>& devices,
bool wait) {
NoGilSection nogil;
StepMarker(device, devices, wait);
},
py::arg("device") = "", py::arg("devices"), py::arg("wait") = true);
m.def("_xla_sync_multi",
[](const std::vector<at::Tensor>& tensors,
const std::vector<std::string>& devices, bool wait,
bool sync_xla_data) {
NoGilSection nogil;
SyncTensors(tensors, devices, wait, sync_xla_data);
},
py::arg("tensors"), py::arg("devices"), py::arg("wait") = true,
py::arg("sync_xla_data") = true);
m.def("_xla_sync_live_tensors",
[](const std::string& device, const std::vector<std::string>& devices,
bool wait) {
NoGilSection nogil;
SyncLiveTensors(device, devices, wait);
},
py::arg("device") = "", py::arg("devices"), py::arg("wait") = true);
m.def("_xla_step_marker",
[](const std::string& device, const std::vector<std::string>& devices,
bool wait) {
NoGilSection nogil;
StepMarker(device, devices, wait);
},
py::arg("device") = "", py::arg("devices"), py::arg("wait") = true);
m.def("_xla_counter_value", [](const std::string& name) -> py::object {
xla::metrics::CounterData* data = xla::metrics::GetCounter(name);
return data != nullptr ? py::cast<int64_t>(data->Value()) : py::none();
});
m.def("_xla_metrics_report",
[]() { return xla::metrics::CreateMetricReport(); });
m.def(
"_xla_tensors_report",
[](size_t nodes_threshold, const std::string& device) {
return GetLiveTensorsReport(nodes_threshold, device);
},
py::arg("nodes_threshold") = 100, py::arg("device") = "");
m.def(
"_xla_set_use_full_mat_mul_precision",
[](bool use_full_mat_mul_precision) {
XlaHelpers::set_mat_mul_precision(use_full_mat_mul_precision
? xla::PrecisionConfig::HIGHEST
: xla::PrecisionConfig::DEFAULT);
},
py::arg("use_full_mat_mul_precision") = true);
m.def("_xla_tensors_report",
[](size_t nodes_threshold, const std::string& device) {
return GetLiveTensorsReport(nodes_threshold, device);
},
py::arg("nodes_threshold") = 100, py::arg("device") = "");
m.def("_xla_set_use_full_mat_mul_precision",
[](bool use_full_mat_mul_precision) {
XlaHelpers::set_mat_mul_precision(
use_full_mat_mul_precision ? xla::PrecisionConfig::HIGHEST
: xla::PrecisionConfig::DEFAULT);
},
py::arg("use_full_mat_mul_precision") = true);
}

} // namespace
Expand Down
1 change: 1 addition & 0 deletions torch_xla/csrc/matrix.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#include "torch_xla/csrc/matrix.h"

#include "tensorflow/compiler/xla/client/lib/constants.h"
#include "tensorflow/compiler/xla/client/lib/matrix.h"
#include "torch_xla/csrc/helpers.h"
Expand Down
8 changes: 4 additions & 4 deletions torch_xla/csrc/ops/adaptive_avg_pool2d.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#include "torch_xla/csrc/ops/adaptive_avg_pool2d.h"

#include "tensorflow/compiler/xla/xla_client/debug_macros.h"
#include "tensorflow/compiler/xla/xla_client/util.h"
#include "torch_xla/csrc/lowering_context.h"
Expand Down Expand Up @@ -26,10 +27,9 @@ xla::Shape NodeOutputShape(

AdaptiveAvgPool2d::AdaptiveAvgPool2d(const Value& input,
std::vector<xla::int64> output_size)
: Node(
ir::OpKind(at::aten::adaptive_avg_pool2d), {input},
[&]() { return NodeOutputShape(input, output_size); },
/*num_outputs=*/1, xla::util::MHash(output_size)),
: Node(ir::OpKind(at::aten::adaptive_avg_pool2d), {input},
[&]() { return NodeOutputShape(input, output_size); },
/*num_outputs=*/1, xla::util::MHash(output_size)),
output_size_(std::move(output_size)) {}

NodePtr AdaptiveAvgPool2d::Clone(OpList operands) const {
Expand Down
7 changes: 3 additions & 4 deletions torch_xla/csrc/ops/arg_max.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,9 @@ xla::Shape NodeOutputShape(const Value& input, xla::int64 dim, bool keepdim) {
} // namespace

ArgMax::ArgMax(const Value& input, xla::int64 dim, bool keepdim)
: Node(
ir::OpKind(at::aten::argmax), {input},
[&]() { return NodeOutputShape(input, dim, keepdim); },
/*num_outputs=*/1, xla::util::MHash(dim, keepdim)),
: Node(ir::OpKind(at::aten::argmax), {input},
[&]() { return NodeOutputShape(input, dim, keepdim); },
/*num_outputs=*/1, xla::util::MHash(dim, keepdim)),
dim_(dim),
keepdim_(keepdim) {}

Expand Down
7 changes: 3 additions & 4 deletions torch_xla/csrc/ops/arg_min.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,9 @@ xla::Shape NodeOutputShape(const Value& input, xla::int64 dim, bool keepdim) {
} // namespace

ArgMin::ArgMin(const Value& input, xla::int64 dim, bool keepdim)
: Node(
ir::OpKind(at::aten::argmin), {input},
[&]() { return NodeOutputShape(input, dim, keepdim); },
/*num_outputs=*/1, xla::util::MHash(dim, keepdim)),
: Node(ir::OpKind(at::aten::argmin), {input},
[&]() { return NodeOutputShape(input, dim, keepdim); },
/*num_outputs=*/1, xla::util::MHash(dim, keepdim)),
dim_(dim),
keepdim_(keepdim) {}

Expand Down
11 changes: 5 additions & 6 deletions torch_xla/csrc/ops/as_strided.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,11 @@ xla::Shape NodeOutputShape(const Value& input,

AsStrided::AsStrided(const Value& input, std::vector<xla::int64> size,
c10::optional<xla::int64> storage_offset)
: Node(
ir::OpKind(at::aten::as_strided), {input},
[&]() { return NodeOutputShape(input, size, storage_offset); },
/*num_outputs=*/1,
xla::util::MHash(size,
OptionalOr<xla::int64>(storage_offset, 0x311bd6))),
: Node(ir::OpKind(at::aten::as_strided), {input},
[&]() { return NodeOutputShape(input, size, storage_offset); },
/*num_outputs=*/1,
xla::util::MHash(size,
OptionalOr<xla::int64>(storage_offset, 0x311bd6))),
size_(std::move(size)),
storage_offset_(storage_offset) {}

Expand Down
4 changes: 2 additions & 2 deletions torch_xla/csrc/ops/as_strided.h
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
#pragma once

#include <vector>

#include <c10/util/Optional.h>

#include <vector>

#include "tensorflow/compiler/xla/types.h"
#include "torch_xla/csrc/ir.h"

Expand Down
19 changes: 9 additions & 10 deletions torch_xla/csrc/ops/avg_pool_nd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,16 +50,15 @@ AvgPoolNd::AvgPoolNd(const Value& input, xla::int64 spatial_dim_count,
std::vector<xla::int64> stride,
std::vector<xla::int64> padding, bool ceil_mode,
bool count_include_pad)
: Node(
ir::OpKind(AvgPoolNdSymbol(spatial_dim_count)), {input},
[&]() {
return NodeOutputShape(input, spatial_dim_count, kernel_size,
stride, padding, ceil_mode,
count_include_pad);
},
/*num_outputs=*/1,
xla::util::MHash(spatial_dim_count, kernel_size, stride, padding,
ceil_mode, count_include_pad)),
: Node(ir::OpKind(AvgPoolNdSymbol(spatial_dim_count)), {input},
[&]() {
return NodeOutputShape(input, spatial_dim_count, kernel_size,
stride, padding, ceil_mode,
count_include_pad);
},
/*num_outputs=*/1,
xla::util::MHash(spatial_dim_count, kernel_size, stride, padding,
ceil_mode, count_include_pad)),
spatial_dim_count_(spatial_dim_count),
kernel_size_(std::move(kernel_size)),
stride_(std::move(stride)),
Expand Down
19 changes: 9 additions & 10 deletions torch_xla/csrc/ops/avg_pool_nd_backward.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,16 +49,15 @@ AvgPoolNdBackward::AvgPoolNdBackward(
const Value& grad_output, const Value& input, xla::int64 spatial_dim_count,
std::vector<xla::int64> kernel_size, std::vector<xla::int64> stride,
std::vector<xla::int64> padding, bool ceil_mode, bool count_include_pad)
: Node(
OpKind(AvgNdBackwardSymbol(spatial_dim_count)), {grad_output, input},
[&]() {
return NodeOutputShape(grad_output, input, spatial_dim_count,
kernel_size, stride, padding, ceil_mode,
count_include_pad);
},
/*num_outputs=*/1,
xla::util::MHash(spatial_dim_count, kernel_size, stride, padding,
ceil_mode, count_include_pad)),
: Node(OpKind(AvgNdBackwardSymbol(spatial_dim_count)), {grad_output, input},
[&]() {
return NodeOutputShape(grad_output, input, spatial_dim_count,
kernel_size, stride, padding, ceil_mode,
count_include_pad);
},
/*num_outputs=*/1,
xla::util::MHash(spatial_dim_count, kernel_size, stride, padding,
ceil_mode, count_include_pad)),
spatial_dim_count_(spatial_dim_count),
kernel_size_(std::move(kernel_size)),
stride_(std::move(stride)),
Expand Down
Loading

0 comments on commit c0f348d

Please sign in to comment.