Skip to content

Commit

Permalink
#0: [Bug Fix] Removed "using namespace std" from all our headers (#11036
Browse files Browse the repository at this point in the history
)
  • Loading branch information
dmakoviichuk-tt authored Aug 2, 2024
1 parent 046271b commit 38c881b
Show file tree
Hide file tree
Showing 59 changed files with 194 additions and 203 deletions.
2 changes: 1 addition & 1 deletion tests/tests_common/sfpu_helper/sfpu_helper.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ float ref_identity(float x) {
return x;
}

vector<uint32_t> sfpu(const vector<uint32_t> &src, std::function<float(float)> sfpu_func) {
vector<uint32_t> sfpu(const std::vector<uint32_t> &src, std::function<float(float)> sfpu_func) {
vector<uint32_t> dst;

for (uint32_t el: src) {
Expand Down
4 changes: 2 additions & 2 deletions tests/tt_eager/ops/test_sliding_window_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ uint32_t compare_conv_out_with_golden(
// It is ok to use pad_metadata since its correctness is validated in other test cases.
uint32_t validate_generate_halo_kernel_config(
tt::tt_metal::Device *device,
const vector<std::pair<uint32_pair_t, uint32_pair_t>> &shard_boundaries,
const tuple<vector<vector<uint16_t>>, vector<vector<uint16_t>>, vector<vector<uint16_t>>>
const std::vector<std::pair<uint32_pair_t, uint32_pair_t>> &shard_boundaries,
const std::tuple<vector<vector<uint16_t>>, std::vector<std::vector<uint16_t>>, std::vector<std::vector<uint16_t>>>
&halo_kernel_config,
const vector<bool> &pad_metadata,
bool remote_read = false,
Expand Down
4 changes: 1 addition & 3 deletions tests/tt_metal/test_utils/df/float32.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,6 @@

#include "tt_metal/common/logger.hpp"

using namespace std;

namespace tt::test_utils::df {

//! Custom type is supported as long as the custom type supports the following custom functions
Expand Down Expand Up @@ -45,7 +43,7 @@ class float32 {
bool operator!=(float32 rhs) { return uint32_data != rhs.uint32_data; }
};

inline ostream& operator<<(ostream& os, const float32& val) {
inline std::ostream& operator<<(std::ostream& os, const float32& val) {
os << val.to_packed();
return os;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,19 +80,19 @@ uint32_t get_l1_size(tt::ARCH arch);

double get_tt_npu_rpeak_tflops(tt::ARCH arch, CoreCoord grid_size, int tt_npu_clock);

tuple<uint32_t, uint32_t, uint32_t> get_aligned_input_tile_num(uint32_t M, uint32_t N, uint32_t K);
std::tuple<uint32_t, uint32_t, uint32_t> get_aligned_input_tile_num(uint32_t M, uint32_t N, uint32_t K);

uint32_t get_in0_block_w(
uint32_t per_core_Mt, uint32_t per_core_Nt, uint32_t Kt, uint32_t single_tile_size, uint32_t l1_size);

CoreCoord get_core_range(
uint32_t num_blocks_rows, uint32_t num_blocks_cols, uint32_t max_num_rows, uint32_t max_num_cols);

tuple<MathFidelity, bool> get_compute_params(tt::ARCH arch);
std::tuple<MathFidelity, bool> get_compute_params(tt::ARCH arch);

tuple<uint32_t, uint32_t> get_out_subblock_params(uint32_t per_core_Mt, uint32_t per_core_Nt, uint32_t choice);
std::tuple<uint32_t, uint32_t> get_out_subblock_params(uint32_t per_core_Mt, uint32_t per_core_Nt, uint32_t choice);

tuple<uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t> get_all_buffers_addresses(
std::tuple<uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t> get_all_buffers_addresses(
uint32_t per_core_Mt, uint32_t per_core_Nt, uint32_t in0_block_w, uint32_t single_tile_size);

std::vector<float> generate_fp32_random(uint32_t num_elems, int32_t rand_max_val);
Expand Down Expand Up @@ -711,7 +711,7 @@ double get_tt_npu_rpeak_tflops(tt::ARCH arch, CoreCoord grid_size, int tt_npu_cl
return rpeak_tflops;
}

tuple<uint32_t, uint32_t, uint32_t> get_aligned_input_tile_num(uint32_t M, uint32_t N, uint32_t K) {
std::tuple<uint32_t, uint32_t, uint32_t> get_aligned_input_tile_num(uint32_t M, uint32_t N, uint32_t K) {
auto align_to_tile = [](uint32_t value) -> uint32_t {
return ((value + (constants::TILE_WIDTH - 1)) / constants::TILE_WIDTH) * constants::TILE_WIDTH;
};
Expand Down Expand Up @@ -772,7 +772,7 @@ CoreCoord get_core_range(
return core_range;
}

tuple<MathFidelity, bool> get_compute_params(tt::ARCH arch) {
std::tuple<MathFidelity, bool> get_compute_params(tt::ARCH arch) {
MathFidelity math_fidelity = MathFidelity::HiFi4;
bool fp32_dest_acc_en = false;
if (arch == tt::ARCH::WORMHOLE || arch == tt::ARCH::WORMHOLE_B0) {
Expand All @@ -787,8 +787,8 @@ tuple<MathFidelity, bool> get_compute_params(tt::ARCH arch) {
return {math_fidelity, fp32_dest_acc_en};
}

tuple<uint32_t, uint32_t> get_out_subblock_params(uint32_t per_core_Mt, uint32_t per_core_Nt, uint32_t choice = 0) {
constexpr std::array<tuple<uint32_t, uint32_t>, 20> SUBBLOCK_HW_CHOICES = {{
std::tuple<uint32_t, uint32_t> get_out_subblock_params(uint32_t per_core_Mt, uint32_t per_core_Nt, uint32_t choice = 0) {
constexpr std::array<std::tuple<uint32_t, uint32_t>, 20> SUBBLOCK_HW_CHOICES = {{
{4, 2}, {2, 4}, {8, 1}, {1, 8}, {7, 1}, {1, 7}, {3, 2}, {2, 3}, {6, 1}, {1, 6},
{5, 1}, {1, 5}, {2, 2}, {4, 1}, {1, 4}, {3, 1}, {1, 3}, {2, 1}, {1, 2}, {1, 1},
}};
Expand All @@ -810,7 +810,7 @@ tuple<uint32_t, uint32_t> get_out_subblock_params(uint32_t per_core_Mt, uint32_t
return {1, 1};
}

tuple<uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t> get_all_buffers_addresses(
std::tuple<uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t> get_all_buffers_addresses(
uint32_t per_core_Mt, uint32_t per_core_Nt, uint32_t in0_block_w, uint32_t single_tile_size) {
uint32_t num_buffer = 2; // double buffering
uint32_t in0_cb_addr = L1_UNRESERVED_BASE;
Expand Down
8 changes: 4 additions & 4 deletions tests/tt_metal/tt_metal/perf_microbenchmark/dispatch/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class DeviceData {
// 10 is a hack...bigger than any core_type
uint64_t base_data_addr[10];
uint64_t base_result_data_addr[10];
unordered_map<CoreCoord, unordered_map<uint32_t, one_core_data_t>> all_data;
std::unordered_map<CoreCoord, std::unordered_map<uint32_t, one_core_data_t>> all_data;
CoreCoord host_core;

// Validate a single core's worth of results vs expected
Expand Down Expand Up @@ -80,7 +80,7 @@ class DeviceData {
int size() { return amt_written; }
int size(CoreCoord core, int bank_id = 0) { return this->all_data[core][bank_id].data.size(); }

unordered_map<CoreCoord, unordered_map<uint32_t, one_core_data_t>>& get_data() { return this->all_data; }
std::unordered_map<CoreCoord, std::unordered_map<uint32_t, one_core_data_t>>& get_data() { return this->all_data; }

CoreType get_core_type(CoreCoord core) { return this->all_data[core][0].core_type; }
uint32_t size_at(CoreCoord core, int bank_id);
Expand Down Expand Up @@ -206,7 +206,7 @@ void DeviceData::prepopulate_dram(Device *device, uint32_t size_words) {

bool DeviceData::core_and_bank_present(CoreCoord core, uint32_t bank) {
if (this->all_data.find(core) != this->all_data.end()) {
unordered_map<uint32_t, one_core_data_t>& core_data = this->all_data.find(core)->second;
std::unordered_map<uint32_t, one_core_data_t>& core_data = this->all_data.find(core)->second;
if (core_data.find(bank) != core_data.end()) {
return true;
}
Expand Down Expand Up @@ -341,7 +341,7 @@ inline bool DeviceData::validate_one_core(Device *device,
const uint32_t start_index,
uint32_t result_addr) {
int fail_count = 0;
const vector<uint32_t>& dev_data = one_core_data.data;
const std::vector<uint32_t>& dev_data = one_core_data.data;
const vector<bool>& dev_valid = one_core_data.valid;
const CoreCoord logical_core = one_core_data.logical_core;
const CoreCoord phys_core = one_core_data.phys_core;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,7 @@ void add_prefetcher_debug_epilogue(vector<uint32_t>& cmds,

void add_prefetcher_cmd_to_hostq(vector<uint32_t>& cmds,
vector<uint32_t>& sizes,
const vector<uint32_t>& payload,
const std::vector<uint32_t>& payload,
size_t prior_end) {
uint32_t cmd_size_bytes = (cmds.size() - prior_end) * sizeof(uint32_t);
for (int i = 0; i < payload.size(); i++) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ int main(int argc, char** argv) {
}
}
}
} catch (exception e) {
} catch (std::exception e) {
test_fixture.TearDown();
return -1;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -475,7 +475,7 @@ int main (int argc, char** argv) {
}
}
}
} catch (exception e) {
} catch (std::exception e) {
test_fixture.TearDown();
return -1;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ int main(int argc, char** argv) {
}
}
}
} catch (exception e) {
} catch (std::exception e) {
test_fixture.TearDown();
return -1;
}
Expand Down
28 changes: 14 additions & 14 deletions tests/tt_metal/tt_metal/test_gold_impls.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@ using std::uint16_t;
// src_vec is expected to be untilized
// result is also untilized
// TODO(AP) - move to gold header
inline vector<uint16_t> gold_transpose_hc(std::vector<uint16_t> src_vec, vector<uint32_t> shape) {
vector<uint32_t> shapeT{shape[0], shape[2], shape[1], shape[3]};
inline std::vector<uint16_t> gold_transpose_hc(std::vector<uint16_t> src_vec, std::vector<uint32_t> shape) {
std::vector<uint32_t> shapeT{shape[0], shape[2], shape[1], shape[3]};
TensAddr addr(shape);
TensAddr addrt(shapeT);

vector<uint16_t> transposed(src_vec.size());
std::vector<uint16_t> transposed(src_vec.size());
for (int n = 0; n < shape[0]; n++)
for (int c = 0; c < shape[1]; c++)
for (int h = 0; h < shape[2]; h++)
Expand All @@ -49,7 +49,7 @@ struct BcastDim {
};
// TODO(AP): fix the gap to match defines in llk_3c.h

static const vector<Enum> all() { return { W, H, HW }; }
static const std::vector<Enum> all() { return { W, H, HW }; }
};

struct BcastOp {
Expand All @@ -70,10 +70,10 @@ struct BcastOp {
// result is also untilized
// bcast_vals for hw mode is expected to have size 1
// bcast_vals for h or w mode is supposed to have h or w elements
inline vector<uint16_t> gold_bcast_op(
const vector<uint16_t>& src_vec,
const vector<uint32_t>& shape,
const vector<uint16_t>& bcast_vals,
inline std::vector<uint16_t> gold_bcast_op(
const std::vector<uint16_t>& src_vec,
const std::vector<uint32_t>& shape,
const std::vector<uint16_t>& bcast_vals,
BcastDim::Enum bcast_dim,
BcastOp::Enum bcast_op
) {
Expand All @@ -82,9 +82,9 @@ inline vector<uint16_t> gold_bcast_op(
TT_FATAL(bcast_dim == BcastDim::H ? bcast_vals.size() == N*C*W : true);
TT_FATAL(bcast_dim == BcastDim::HW ? bcast_vals.size() == N*C : true);

vector<uint32_t> shape_dst{N, C, H, W};
std::vector<uint32_t> shape_dst{N, C, H, W};
TensAddr addr(shape);
vector<uint16_t> result(addr.numel());
std::vector<uint16_t> result(addr.numel());
std::fill(result.begin(), result.end(), 0);
for (int n = 0; n < N; n++)
for (int c = 0; c < C; c++)
Expand Down Expand Up @@ -118,10 +118,10 @@ inline vector<uint16_t> gold_bcast_op(
// Basic gold batch matmul implementation.
// Returns C=A*B, A and B are row-major untilized
// Accumulates in FP32
inline vector<uint16_t> gold_bmm(
const vector<uint32_t> shapeA,
const vector<uint16_t>& A,
const vector<uint32_t>& shapeB,
inline std::vector<uint16_t> gold_bmm(
const std::vector<uint32_t> shapeA,
const std::vector<uint16_t>& A,
const std::vector<uint32_t>& shapeB,
const vector<uint16_t>& B,
bool acc16 = false
)
Expand Down
4 changes: 2 additions & 2 deletions tests/tt_metal/tt_metal/test_untilize_eltwise_binary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@

using namespace tt;

inline vector<uint32_t> gold_standard_untilize(std::vector<uint32_t> src_vec, vector<uint32_t> shape) {
vector<uint32_t> dst_vec;
inline std::vector<uint32_t> gold_standard_untilize(std::vector<uint32_t> src_vec, std::vector<uint32_t> shape) {
std::vector<uint32_t> dst_vec;

int num_rows = shape.at(0);
int num_cols = shape.at(1) / 2;
Expand Down
2 changes: 1 addition & 1 deletion tests/tt_metal/tt_metal/unit_tests/basic/device.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ bool dram_ping(
tt_metal::detail::ReadFromDeviceDRAMChannel(device, channel, dram_byte_address, byte_size, dest_channel_data);
pass &= (dest_channel_data == inputs);
if (not pass) {
cout << "Mismatch at Channel: " << channel << std::endl;
std::cout << "Mismatch at Channel: " << channel << std::endl;
}
}
return pass;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ vector<uint32_t> generate_packed_sfpu_input(const unsigned int numel, const stri
}
}

bool is_close_packed_sfpu_output(const vector<uint32_t>& vec_a, const vector<uint32_t>& vec_b, const string& op_name) {
bool is_close_packed_sfpu_output(const std::vector<uint32_t>& vec_a, const std::vector<uint32_t>& vec_b, const string& op_name) {
if (op_name == "tanh") {
return is_close_packed_vectors<tt::test_utils::df::bfloat16, uint32_t>(
vec_a, vec_b, [&](const tt::test_utils::df::bfloat16& a, const tt::test_utils::df::bfloat16& b) { return is_close(a, b, 0.175f, 0.1f); });
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,9 +151,9 @@ bool stress_test_EnqueueWriteBuffer_and_EnqueueReadBuffer(
bool pass = true;
uint32_t num_pages_left = config.num_pages_total;

vector<unique_ptr<Buffer>> buffers;
vector<vector<uint32_t>> srcs;
vector<vector<uint32_t>> dsts;
std::vector<std::unique_ptr<Buffer>> buffers;
std::vector<std::vector<uint32_t>> srcs;
std::vector<std::vector<uint32_t>> dsts;
while (num_pages_left) {
uint32_t num_pages = std::min(rand() % (config.max_num_pages_per_buffer) + 1, num_pages_left);
num_pages_left -= num_pages;
Expand All @@ -170,7 +170,7 @@ bool stress_test_EnqueueWriteBuffer_and_EnqueueReadBuffer(
buftype = BufferType::L1;
}

unique_ptr<Buffer> buf;
std::unique_ptr<Buffer> buf;
try {
buf = std::make_unique<Buffer>(device, buf_size, config.page_size, buftype);
} catch (...) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ vector<uint32_t> generate_packed_sfpu_input(const unsigned int numel, const stri
}
}

bool is_close_packed_sfpu_output(const vector<uint32_t>& vec_a, const vector<uint32_t>& vec_b, const string& op_name) {
bool is_close_packed_sfpu_output(const std::vector<uint32_t>& vec_a, const std::vector<uint32_t>& vec_b, const string& op_name) {
if (op_name == "tanh") {
return is_close_packed_vectors<tt::test_utils::df::bfloat16, uint32_t>(
vec_a, vec_b, [&](const tt::test_utils::df::bfloat16& a, const tt::test_utils::df::bfloat16& b) { return is_close(a, b, 0.175f, 0.1f); });
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ bool test_EnqueueWriteBuffer_and_EnqueueReadBuffer_multi_queue(Device* device, v
for (const bool use_void_star_api: {true, false}) {

size_t buf_size = config.num_pages * config.page_size;
vector<std::unique_ptr<Buffer>> buffers;
vector<vector<uint32_t>> srcs;
std::vector<std::unique_ptr<Buffer>> buffers;
std::vector<std::vector<uint32_t>> srcs;
for (uint i = 0; i < cqs.size(); i++) {
buffers.push_back(std::make_unique<Buffer>(device, buf_size, config.page_size, config.buftype));
srcs.push_back(generate_arange_vector(buffers[i]->size()));
Expand All @@ -34,7 +34,7 @@ bool test_EnqueueWriteBuffer_and_EnqueueReadBuffer_multi_queue(Device* device, v
}

for (uint i = 0; i < cqs.size(); i++) {
vector<uint32_t> result;
std::vector<uint32_t> result;
if (use_void_star_api) {
result.resize(buf_size / sizeof(uint32_t));
EnqueueReadBuffer(cqs[i], *buffers[i], result.data(), true);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ struct BufferStressTestConfig {
};


inline vector<uint32_t> generate_arange_vector(uint32_t size_bytes, uint32_t start = 0) {
inline std::vector<uint32_t> generate_arange_vector(uint32_t size_bytes, uint32_t start = 0) {
TT_FATAL(size_bytes % sizeof(uint32_t) == 0);
vector<uint32_t> src(size_bytes / sizeof(uint32_t), 0);

Expand Down
Loading

0 comments on commit 38c881b

Please sign in to comment.