Skip to content

Commit

Permalink
id_to_counter => buffer_id_to_counter and tensor_id_to_counter
Browse files Browse the repository at this point in the history
  • Loading branch information
mbezuljTT committed Nov 26, 2024
1 parent b6177f3 commit a01fecc
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 17 deletions.
37 changes: 21 additions & 16 deletions ttnn/cpp/ttnn/graph/graph_processor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,9 +95,11 @@ GraphProcessor::GraphProcessor(RunMode mode) : run_mode(mode) {
}
void GraphProcessor::track_allocate(const tt::tt_metal::Buffer* buffer) {
const std::lock_guard<std::mutex> lock(mutex);
auto buf_id = add_buffer(buffer);
auto buffer_id = add_buffer(buffer);

// auto alloc_id = reinterpret_cast<std::uintptr_t>(buffer);
// auto buffer_id = buffer->unique_id();

auto alloc_id = reinterpret_cast<std::uintptr_t>(buffer);
auto counter = graph.size();

std::unordered_map<std::string, std::string> params = {
Expand All @@ -113,15 +115,15 @@ void GraphProcessor::track_allocate(const tt::tt_metal::Buffer* buffer) {
.counter = counter,
.node_type = kNodeBufferAllocate,
.params = params,
.connections = {buf_id}
.connections = {buffer_id}
});
graph[current_op_id.top()].connections.push_back(counter);
}
}

void GraphProcessor::track_deallocate(tt::tt_metal::Buffer* buffer) {
const std::lock_guard<std::mutex> lock(mutex);
auto buffer_idx = add_buffer(buffer);
auto buffer_id = add_buffer(buffer);
auto counter = graph.size();
std::unordered_map<std::string, std::string> params = {
{kSize, std::to_string(buffer->size())},
Expand All @@ -135,7 +137,7 @@ void GraphProcessor::track_deallocate(tt::tt_metal::Buffer* buffer) {
.counter = counter,
.node_type = kNodeBufferDeallocate,
.params = params,
.connections = {buffer_idx}
.connections = {buffer_id}
});
graph[current_op_id.top()].connections.push_back(counter);
}
Expand Down Expand Up @@ -286,31 +288,33 @@ int GraphProcessor::add_tensor(const Tensor& t) {
} else {
tensor_id = t.tensor_id.value();
}
auto tensor_counter = id_to_counter.count(tensor_id) > 0 ? id_to_counter[tensor_id] : graph.size();
auto tensor_counter = tensor_id_to_counter.count(tensor_id) > 0 ? tensor_id_to_counter[tensor_id] : graph.size();
auto shape = t.get_shape();
std::unordered_map<std::string, std::string> params = {
{kShape, fmt::format("{}", shape)},
{kTensorId, fmt::format("{}", tensor_id)},
};

if (id_to_counter.count(tensor_id) == 0) {
if (tensor_id_to_counter.count(tensor_id) == 0) {
graph.push_back(Vertex{.counter = tensor_counter, .node_type = kNodeTensor, .params = params, .connections = {}});
id_to_counter[tensor_id] = tensor_counter;
tensor_id_to_counter[tensor_id] = tensor_counter;
}

if (buffer) {
auto buffer_idx = add_buffer(buffer);
graph[buffer_idx].connections.push_back(tensor_counter);
auto buffer_id = add_buffer(buffer);
graph[buffer_id].connections.push_back(tensor_counter);
} else {
tt::log_info("Tensor doesn't have buffer, but storage is {}", demangle(get_type_in_var(t.get_storage()).name()));
}
return tensor_counter;
}

int GraphProcessor::add_buffer(const tt::tt_metal::Buffer* buffer) {
auto buffer_alloc_id = buffer->unique_id();
auto counter = id_to_counter.count(buffer_alloc_id) > 0 ? id_to_counter[buffer_alloc_id] : graph.size();
if (id_to_counter.count(buffer_alloc_id) == 0) {
auto buffer_id = buffer->unique_id();
auto counter = buffer_id_to_counter.count(buffer_id) > 0 ? buffer_id_to_counter[buffer_id] : graph.size();

std::cout << "# " << counter << " " << buffer_id << std::endl;
if (buffer_id_to_counter.count(buffer_id) == 0) {
std::unordered_map<std::string, std::string> params = {
{kSize, std::to_string(buffer->size())},
{kType, buffer->is_dram() ? "DRAM" : "L1"},
Expand All @@ -324,10 +328,10 @@ int GraphProcessor::add_buffer(const tt::tt_metal::Buffer* buffer) {
.connections = {}
});
graph[current_op_id.top()].connections.push_back(counter);
id_to_counter[buffer_alloc_id] = counter;
buffer_id_to_counter[buffer_id] = counter;
return counter;
}
return id_to_counter[buffer_alloc_id];
return buffer_id_to_counter[buffer_id];
}


Expand Down Expand Up @@ -428,7 +432,8 @@ void GraphProcessor::end_function_process_optional_tensor(const std::any& any_va
void GraphProcessor::begin_capture(RunMode mode) {
const std::lock_guard<std::mutex> lock(mutex);
graph.clear();
id_to_counter.clear();
buffer_id_to_counter.clear();
tensor_id_to_counter.clear();
graph.push_back(Vertex{
.counter = 0,
.node_type = kNodeCaptureStart,
Expand Down
3 changes: 2 additions & 1 deletion ttnn/cpp/ttnn/graph/graph_processor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,8 @@ namespace ttnn::graph {
std::mutex mutex;
RunMode run_mode = RunMode::NORMAL;
std::stack<int> current_op_id;
std::unordered_map<std::int64_t, int> id_to_counter;
std::unordered_map<std::int64_t, int> buffer_id_to_counter;
std::unordered_map<std::int64_t, int> tensor_id_to_counter;
int last_finished_op_id = -1;
std::vector<Vertex> graph;
std::unordered_map<std::type_index, ProcessFunc> begin_function_any_map;
Expand Down

0 comments on commit a01fecc

Please sign in to comment.