diff --git a/common/utils.hpp b/common/utils.hpp new file mode 100644 index 00000000..b8cba9f5 --- /dev/null +++ b/common/utils.hpp @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: (c) 2024 Tenstorrent Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include +#include +#include + +namespace tt::umd::utils { + +std::string get_abs_path(std::string path) { + // Note that __FILE__ might be resolved at compile time to an absolute or relative address, depending on the + // compiler. + std::filesystem::path current_file_path = std::filesystem::path(__FILE__); + std::filesystem::path umd_root; + if (current_file_path.is_absolute()) { + umd_root = current_file_path.parent_path().parent_path(); + } else { + std::filesystem::path umd_root_relative = + std::filesystem::relative(std::filesystem::path(__FILE__).parent_path().parent_path(), "../"); + umd_root = std::filesystem::canonical(umd_root_relative); + } + std::filesystem::path abs_path = umd_root / path; + return abs_path.string(); +} + +} // namespace tt::umd::utils diff --git a/device/cluster.cpp b/device/cluster.cpp index e73af023..72378b13 100644 --- a/device/cluster.cpp +++ b/device/cluster.cpp @@ -37,6 +37,8 @@ #include #include +#include "tt_arch_types.h" +#include "tt_cluster_descriptor.h" #include "yaml-cpp/yaml.h" #include "common/logger.hpp" @@ -103,7 +105,7 @@ void size_buffer_to_capacity(std::vector &data_buf, std::size_t size_in_bytes // TODO: To be removed when tt_device is removed -tt_device::tt_device(const std::string& sdesc_path) : soc_descriptor_per_chip({}) { +tt_device::tt_device() : soc_descriptor_per_chip({}) { } tt_device::~tt_device() { @@ -300,30 +302,12 @@ std::unordered_map Cluster::get_harvesting_masks_for_soc_de return default_harvesting_masks; } -Cluster::Cluster(const std::string &sdesc_path, const std::string &ndesc_path, const std::set &target_devices, - const uint32_t &num_host_mem_ch_per_mmio_device, const bool skip_driver_allocs, - const bool clean_system_resources, bool perform_harvesting, std::unordered_map simulated_harvesting_masks) : tt_device(sdesc_path) { - std::unordered_set target_mmio_device_ids; - target_devices_in_cluster = target_devices; - arch_name = tt_SocDescriptor(sdesc_path).arch; - perform_harvesting_on_sdesc = perform_harvesting; - - auto available_device_ids = detect_available_device_ids(); - m_num_pci_devices = available_device_ids.size(); - - if (!skip_driver_allocs) { - log_info(LogSiliconDriver, "Detected {} PCI device{} : {}", m_num_pci_devices, (m_num_pci_devices > 1) ? "s":"", available_device_ids); - log_debug(LogSiliconDriver, "Passed target devices: {}", target_devices); - } +void Cluster::construct_cluster(const std::string& sdesc_path, const uint32_t &num_host_mem_ch_per_mmio_device, const bool skip_driver_allocs, + const bool clean_system_resources, bool perform_harvesting, std::unordered_map simulated_harvesting_masks) { - std::string cluster_descriptor_path = ndesc_path; - if (cluster_descriptor_path == "") { - cluster_descriptor_path = tt_ClusterDescriptor::get_cluster_descriptor_file_path(); - } - - ndesc = tt_ClusterDescriptor::create_from_yaml(cluster_descriptor_path); - - for (auto &d: target_devices){ + std::unordered_set target_mmio_device_ids; + for (auto &d: target_devices_in_cluster){ + log_assert(ndesc->get_all_chips().find(d) != ndesc->get_all_chips().end(), "Target device {} not present in current cluster!", d); if (ndesc->is_chip_mmio_capable(d)){ target_mmio_device_ids.insert(d); } @@ -357,7 +341,7 @@ Cluster::Cluster(const std::string &sdesc_path, const std::string &ndesc_path, c translation_tables_en = false; for(auto& masks : harvesting_masks) { - if(target_devices.find(masks.first) != target_devices.end()) { + if(target_devices_in_cluster.find(masks.first) != target_devices_in_cluster.end()) { harvested_rows_per_target[masks.first] = get_harvested_noc_rows(masks.second); noc_translation_enabled_for_chip[masks.first] = noc_translation_enabled.at(masks.first); num_rows_harvested.insert({masks.first, std::bitset<32>(masks.second).count()}); @@ -386,7 +370,7 @@ Cluster::Cluster(const std::string &sdesc_path, const std::string &ndesc_path, c } else if(arch_name == tt::ARCH::BLACKHOLE) { // Default harvesting info for Blackhole, describing no harvesting - for(auto chip_id = target_devices.begin(); chip_id != target_devices.end(); chip_id++){ + for(auto chip_id = target_devices_in_cluster.begin(); chip_id != target_devices_in_cluster.end(); chip_id++){ harvested_rows_per_target[*chip_id] = 0; //get_harvested_noc_rows_for_chip(*chip_id); num_rows_harvested.insert({*chip_id, 0}); // Only set for broadcast TLB to get RISCS out of reset. We want all rows to have a reset signal sent. if(harvested_rows_per_target[*chip_id]) { @@ -396,7 +380,7 @@ Cluster::Cluster(const std::string &sdesc_path, const std::string &ndesc_path, c } else if(arch_name == tt::ARCH::GRAYSKULL) { // Multichip harvesting is supported for GS. - for(auto chip_id = target_devices.begin(); chip_id != target_devices.end(); chip_id++){ + for(auto chip_id = target_devices_in_cluster.begin(); chip_id != target_devices_in_cluster.end(); chip_id++){ harvested_rows_per_target[*chip_id] = get_harvested_noc_rows_for_chip(*chip_id); num_rows_harvested.insert({*chip_id, 0}); // Only set for broadcast TLB to get RISCS out of reset. We want all rows to have a reset signal sent. if(harvested_rows_per_target[*chip_id]) { @@ -407,7 +391,7 @@ Cluster::Cluster(const std::string &sdesc_path, const std::string &ndesc_path, c if(simulated_harvesting_masks.size()) { performed_harvesting = true; - for (auto device_id = target_devices.begin(); device_id != target_devices.end(); device_id++) { + for (auto device_id = target_devices_in_cluster.begin(); device_id != target_devices_in_cluster.end(); device_id++) { log_assert(simulated_harvesting_masks.find(*device_id) != simulated_harvesting_masks.end(), "Could not find harvesting mask for device_id {}", *device_id); if(arch_name == tt::ARCH::GRAYSKULL) { if ((simulated_harvesting_masks.at(*device_id) & harvested_rows_per_target[*device_id]) != harvested_rows_per_target[*device_id]) { @@ -456,7 +440,94 @@ Cluster::Cluster(const std::string &sdesc_path, const std::string &ndesc_path, c // Default initialize noc_params based on detected arch noc_params = architecture_implementation->get_noc_params(); +} + +Cluster::Cluster(const uint32_t &num_host_mem_ch_per_mmio_device, const bool skip_driver_allocs, + const bool clean_system_resources, bool perform_harvesting, std::unordered_map simulated_harvesting_masks) : tt_device() { + // TODO: this should be fetched through ClusterDescriptor + auto available_device_ids = detect_available_device_ids(); + m_num_pci_devices = available_device_ids.size(); + + int physical_device_id = available_device_ids[0]; + // TODO: remove logical_device_id + PCIDevice pci_device (physical_device_id, 0); + tt::ARCH device_arch = pci_device.get_arch(); + + std::string sdesc_path = tt_SocDescriptor::get_soc_descriptor_path(device_arch); + + arch_name = tt_SocDescriptor(sdesc_path).arch; + perform_harvesting_on_sdesc = perform_harvesting; + + if (!skip_driver_allocs) { + log_info(LogSiliconDriver, "Detected {} PCI device{} : {}", m_num_pci_devices, (m_num_pci_devices > 1) ? "s":"", available_device_ids); + log_debug(LogSiliconDriver, "Passed target devices: {}", target_devices); + } + + std::string ndesc_path = tt_ClusterDescriptor::get_cluster_descriptor_file_path(); + ndesc = tt_ClusterDescriptor::create_from_yaml(ndesc_path); + + std::set target_devices; + for(const chip_id_t &d : ndesc->get_all_chips()) { + target_devices.insert(d); + } + target_devices_in_cluster = target_devices; + + construct_cluster(sdesc_path, num_host_mem_ch_per_mmio_device, skip_driver_allocs, clean_system_resources, perform_harvesting, simulated_harvesting_masks); +} + +Cluster::Cluster(const std::set &target_devices, const uint32_t &num_host_mem_ch_per_mmio_device, const bool skip_driver_allocs, + const bool clean_system_resources, bool perform_harvesting, std::unordered_map simulated_harvesting_masks) : tt_device() { + // TODO: this should be fetched through ClusterDescriptor + auto available_device_ids = detect_available_device_ids(); + m_num_pci_devices = available_device_ids.size(); + + int physical_device_id = available_device_ids[0]; + // TODO: remove logical_device_id + PCIDevice pci_device (physical_device_id, 0); + tt::ARCH device_arch = pci_device.get_arch(); + + std::string sdesc_path = tt_SocDescriptor::get_soc_descriptor_path(device_arch); + + arch_name = tt_SocDescriptor(sdesc_path).arch; + perform_harvesting_on_sdesc = perform_harvesting; + + if (!skip_driver_allocs) { + log_info(LogSiliconDriver, "Detected {} PCI device{} : {}", m_num_pci_devices, (m_num_pci_devices > 1) ? "s":"", available_device_ids); + log_debug(LogSiliconDriver, "Passed target devices: {}", target_devices); + } + + std::string ndesc_path = tt_ClusterDescriptor::get_cluster_descriptor_file_path(); + ndesc = tt_ClusterDescriptor::create_from_yaml(ndesc_path); + + target_devices_in_cluster = target_devices; + + construct_cluster(sdesc_path, num_host_mem_ch_per_mmio_device, skip_driver_allocs, clean_system_resources, perform_harvesting, simulated_harvesting_masks); +} + +Cluster::Cluster(const std::string &sdesc_path, const std::string &ndesc_path, const std::set &target_devices, + const uint32_t &num_host_mem_ch_per_mmio_device, const bool skip_driver_allocs, + const bool clean_system_resources, bool perform_harvesting, std::unordered_map simulated_harvesting_masks) : tt_device() { + // TODO: this should be fetched through ClusterDescriptor + auto available_device_ids = detect_available_device_ids(); + m_num_pci_devices = available_device_ids.size(); + + target_devices_in_cluster = target_devices; + arch_name = tt_SocDescriptor(sdesc_path).arch; + perform_harvesting_on_sdesc = perform_harvesting; + + if (!skip_driver_allocs) { + log_info(LogSiliconDriver, "Detected {} PCI device{} : {}", m_num_pci_devices, (m_num_pci_devices > 1) ? "s":"", available_device_ids); + log_debug(LogSiliconDriver, "Passed target devices: {}", target_devices); + } + + std::string cluster_descriptor_path = ndesc_path; + if (cluster_descriptor_path == "") { + cluster_descriptor_path = tt_ClusterDescriptor::get_cluster_descriptor_file_path(); + } + + ndesc = tt_ClusterDescriptor::create_from_yaml(cluster_descriptor_path); + construct_cluster(sdesc_path, num_host_mem_ch_per_mmio_device, skip_driver_allocs, clean_system_resources, perform_harvesting, simulated_harvesting_masks); } void Cluster::configure_active_ethernet_cores_for_mmio_device(chip_id_t mmio_chip, const std::unordered_set& active_eth_cores_per_chip) { diff --git a/device/cluster.h b/device/cluster.h index f2cb6f94..b5caaa85 100644 --- a/device/cluster.h +++ b/device/cluster.h @@ -220,7 +220,7 @@ struct tt_device_params { class tt_device { public: - tt_device(const std::string& sdesc_path); + tt_device(); virtual ~tt_device(); // Setup/Teardown Functions /** @@ -606,10 +606,10 @@ namespace tt::umd { */ class Cluster: public tt_device { - public: +public: // Constructor /** - * Silicon Driver constructor. + * Cluster constructor. * * @param sdesc_path SOC descriptor specifying single chip. * @param ndesc_path Network Descriptor specifying the network topology of the system. @@ -624,6 +624,32 @@ class Cluster: public tt_device const uint32_t &num_host_mem_ch_per_mmio_device = 1, const bool skip_driver_allocs = false, const bool clean_system_resources = false, bool perform_harvesting = true, std::unordered_map simulated_harvesting_masks = {}); + /** + * Cluster constructor. This constructor should be used to work towards removing all + * of the params from the constructor of tt_SiliconDevice (to become Cluster). + * + * @param num_host_mem_ch_per_mmio_device Requested number of host channels (hugepages). + * @param skip_driver_allocs + * @param clean_system_resource Specifies if host state from previous runs needs to be cleaned up. + * @param perform_harvesting Allow the driver to modify the SOC descriptors per chip. + * @param simulated_harvesting_masks + */ + Cluster(const uint32_t &num_host_mem_ch_per_mmio_device = 1, const bool skip_driver_allocs = false, + const bool clean_system_resources = false, bool perform_harvesting = true, std::unordered_map simulated_harvesting_masks = {}); + + /** + * Cluster constructor. This constructor should be used to target specific devices in a cluster. + * + * @param target_devices Devices to target. + * @param num_host_mem_ch_per_mmio_device Requested number of host channels (hugepages). + * @param skip_driver_allocs + * @param clean_system_resource Specifies if host state from previous runs needs to be cleaned up. + * @param perform_harvesting Allow the driver to modify the SOC descriptors per chip. + * @param simulated_harvesting_masks + */ + Cluster(const std::set &target_devices, const uint32_t &num_host_mem_ch_per_mmio_device = 1, const bool skip_driver_allocs = false, + const bool clean_system_resources = false, bool perform_harvesting = true, std::unordered_map simulated_harvesting_masks = {}); + //Setup/Teardown Functions virtual std::unordered_map& get_virtual_soc_descriptors(); virtual void set_device_l1_address_params(const tt_device_l1_address_params& l1_address_params_); @@ -710,7 +736,7 @@ class Cluster: public tt_device // Destructor virtual ~Cluster (); - private: +private: // Helper functions // Startup + teardown void create_device(const std::unordered_set &target_mmio_device_ids, const uint32_t &num_host_mem_ch_per_mmio_device, const bool skip_driver_allocs, const bool clean_system_resources); @@ -771,6 +797,9 @@ class Cluster: public tt_device // This functions has to be called for local chip, and then it will wait for all connected remote chips to flush. void wait_for_connected_non_mmio_flush(chip_id_t chip_id); + void construct_cluster(const std::string& sdesc_path, const uint32_t &num_host_mem_ch_per_mmio_device, const bool skip_driver_allocs, + const bool clean_system_resources, bool perform_harvesting, std::unordered_map simulated_harvesting_masks); + // State variables tt_device_dram_address_params dram_address_params; tt_device_l1_address_params l1_address_params; diff --git a/device/mockup/tt_mockup_device.hpp b/device/mockup/tt_mockup_device.hpp index 2d888d93..e6085b39 100644 --- a/device/mockup/tt_mockup_device.hpp +++ b/device/mockup/tt_mockup_device.hpp @@ -14,7 +14,7 @@ class tt_MockupDevice : public tt_device { public: - tt_MockupDevice(const std::string& sdesc_path) : tt_device(sdesc_path) { + tt_MockupDevice(const std::string& sdesc_path) : tt_device() { soc_descriptor_per_chip.emplace(0, tt_SocDescriptor(sdesc_path)); std::set target_devices = {0}; } diff --git a/device/simulation/tt_simulation_device.cpp b/device/simulation/tt_simulation_device.cpp index e3909ecc..a77a8ad3 100644 --- a/device/simulation/tt_simulation_device.cpp +++ b/device/simulation/tt_simulation_device.cpp @@ -48,7 +48,7 @@ void print_flatbuffer(const DeviceRequestResponse *buf){ std::cout << std::endl; } -tt_SimulationDevice::tt_SimulationDevice(const std::string &sdesc_path) : tt_device(sdesc_path){ +tt_SimulationDevice::tt_SimulationDevice(const std::string &sdesc_path) : tt_device(){ log_info(tt::LogEmulationDriver, "Instantiating simulation device"); soc_descriptor_per_chip.emplace(0, tt_SocDescriptor(sdesc_path)); std::set target_devices = {0}; diff --git a/device/tt_cluster_descriptor.h b/device/tt_cluster_descriptor.h index a51e6ac4..56ec9393 100644 --- a/device/tt_cluster_descriptor.h +++ b/device/tt_cluster_descriptor.h @@ -33,10 +33,10 @@ enum BoardType : uint32_t { class tt_ClusterDescriptor { - private: +private: int get_ethernet_link_coord_distance(const eth_coord_t &location_a, const eth_coord_t &location_b) const; - protected: +protected: std::unordered_map > > ethernet_connections; std::unordered_map chip_locations; @@ -73,7 +73,7 @@ class tt_ClusterDescriptor { void fill_chips_grouped_by_closest_mmio(); - public: +public: tt_ClusterDescriptor() = default; tt_ClusterDescriptor(const tt_ClusterDescriptor&) = default; diff --git a/device/tt_device.cpp b/device/tt_device.cpp new file mode 100644 index 00000000..9df2f392 --- /dev/null +++ b/device/tt_device.cpp @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: (c) 2023 Tenstorrent Inc. +// +// SPDX-License-Identifier: Apache-2.0 + + +#ifdef TT_DEBUG_LOGGING +#define DEBUG_LOG(str) do { std::cout << str << std::endl; } while( false ) +#else +#define DEBUG_LOG(str) ((void)0) +#endif + +#include "tt_device.h" +#include "device/tt_cluster_descriptor_types.h" +#include +#include +#include +#include +#include +#include "yaml-cpp/yaml.h" + +//////// +// Device base +//////// +tt_device::tt_device() : soc_descriptor_per_chip({}) { +} + +tt_device::~tt_device() { +} + +const tt_SocDescriptor& tt_device::get_soc_descriptor(chip_id_t chip_id) const { + return soc_descriptor_per_chip.at(chip_id); +} diff --git a/device/tt_soc_descriptor.cpp b/device/tt_soc_descriptor.cpp index 9a572420..74c35e59 100644 --- a/device/tt_soc_descriptor.cpp +++ b/device/tt_soc_descriptor.cpp @@ -5,6 +5,8 @@ #include "yaml-cpp/yaml.h" #include "tt_soc_descriptor.h" +#include "common/utils.hpp" + #include #include #include @@ -273,6 +275,22 @@ bool tt_SocDescriptor::is_ethernet_core(const tt_xy_pair &core) const { return this->ethernet_core_channel_map.find(core) != ethernet_core_channel_map.end(); } +std::string tt_SocDescriptor::get_soc_descriptor_path(tt::ARCH arch) { + switch (arch) { + case tt::ARCH::GRAYSKULL: + // TODO: this path needs to be changed to point to soc descriptors outside of tests directory. + return tt::umd::utils::get_abs_path("tests/soc_descs/grayskull_10x12.yaml"); + case tt::ARCH::WORMHOLE_B0: + // TODO: this path needs to be changed to point to soc descriptors outside of tests directory. + return tt::umd::utils::get_abs_path("tests/soc_descs/wormhole_b0_8x10.yaml"); + case tt::ARCH::BLACKHOLE: + // TODO: this path needs to be changed to point to soc descriptors outside of tests directory. + return tt::umd::utils::get_abs_path("tests/soc_descs/blackhole_140_arch_no_eth.yaml"); + default: + throw std::runtime_error("Invalid architecture"); + } +} + std::ostream &operator<<(std::ostream &out, const tt::ARCH &arch_name) { if (arch_name == tt::ARCH::Invalid) { out << "none"; diff --git a/device/tt_soc_descriptor.h b/device/tt_soc_descriptor.h index 372d0a29..e0529570 100644 --- a/device/tt_soc_descriptor.h +++ b/device/tt_soc_descriptor.h @@ -189,11 +189,14 @@ class tt_SocDescriptor { void perform_harvesting(std::size_t harvesting_mask); + static std::string get_soc_descriptor_path(tt::ARCH arch); + private: - std::unique_ptr coordinate_manager = nullptr; void create_coordinate_manager(std::size_t harvesting_mask); void load_core_descriptors_from_device_descriptor(YAML::Node &device_descriptor_yaml); void load_soc_features_from_device_descriptor(YAML::Node &device_descriptor_yaml); + + std::unique_ptr coordinate_manager = nullptr; }; // Allocates a new soc descriptor on the heap. Returns an owning pointer. diff --git a/tests/api/test_chip.cpp b/tests/api/test_chip.cpp index 713cf464..339d1abc 100644 --- a/tests/api/test_chip.cpp +++ b/tests/api/test_chip.cpp @@ -21,13 +21,6 @@ using namespace tt::umd; -inline std::unique_ptr get_cluster_desc() { - // TODO: remove getting manually cluster descriptor from yaml. - std::string yaml_path = tt_ClusterDescriptor::get_cluster_descriptor_file_path(); - - return tt_ClusterDescriptor::create_from_yaml(yaml_path); -} - inline tt_cxy_pair get_tensix_chip_core_coord(const std::unique_ptr &umd_cluster) { chip_id_t any_mmio_chip = *umd_cluster->get_target_mmio_device_ids().begin(); const tt_SocDescriptor& soc_desc = umd_cluster->get_soc_descriptor(any_mmio_chip); @@ -36,60 +29,12 @@ inline tt_cxy_pair get_tensix_chip_core_coord(const std::unique_ptr &um } inline std::unique_ptr get_cluster() { - - // TODO: This should not be needed. And could be part of the cluster descriptor probably. - // Note that cluster descriptor holds logical ids of chips. - // Which are different than physical PCI ids, which are /dev/tenstorrent/N ones. - // You have to see if physical PCIe is GS before constructing a cluster descriptor. std::vector pci_device_ids = PCIDevice::enumerate_devices(); - std::set pci_device_ids_set (pci_device_ids.begin(), pci_device_ids.end()); - - tt::ARCH device_arch = tt::ARCH::GRAYSKULL; - if (!pci_device_ids.empty()) { - // TODO: This should be removed from the API, the driver itself should do it. - int physical_device_id = pci_device_ids[0]; - // TODO: remove logical_device_id - PCIDevice pci_device (physical_device_id, 0); - device_arch = pci_device.get_arch(); - } - // TODO: Make this test work on a host system without any tt devices. if (pci_device_ids.empty()) { return nullptr; } - - std::string yaml_path; - if (device_arch == tt::ARCH::GRAYSKULL) { - yaml_path = ""; - } else if (device_arch == tt::ARCH::BLACKHOLE) { - yaml_path = test_utils::GetAbsPath("blackhole_1chip_cluster.yaml"); - } else { - // TODO: remove getting manually cluster descriptor from yaml. - yaml_path = tt_ClusterDescriptor::get_cluster_descriptor_file_path(); - } - // TODO: Remove the need to do this, allow default constructor to construct with all chips. - std::unique_ptr cluster_desc = get_cluster_desc(); - std::unordered_set detected_num_chips = cluster_desc->get_all_chips(); - - // TODO: make this unordered vs set conversion not needed. - std::set detected_num_chips_set (detected_num_chips.begin(), detected_num_chips.end()); - - - // TODO: This would be incorporated inside SocDescriptor. - std::string soc_path; - if (device_arch == tt::ARCH::GRAYSKULL) { - soc_path = test_utils::GetAbsPath("tests/soc_descs/grayskull_10x12.yaml"); - } else if (device_arch == tt::ARCH::WORMHOLE_B0) { - soc_path = test_utils::GetAbsPath("tests/soc_descs/wormhole_b0_8x10.yaml"); - } else if (device_arch == tt::ARCH::BLACKHOLE) { - soc_path = test_utils::GetAbsPath("tests/soc_descs/blackhole_140_arch_no_eth.yaml"); - } else { - throw std::runtime_error("Unsupported architecture"); - } - - - // TODO: Don't pass each of these arguments. - return std::unique_ptr(new Cluster(soc_path, tt_ClusterDescriptor::get_cluster_descriptor_file_path(), detected_num_chips_set)); + return std::unique_ptr(new Cluster()); } // TODO: Once default auto TLB setup is in, check it is setup properly. diff --git a/tests/api/test_cluster.cpp b/tests/api/test_cluster.cpp index fc68f62a..339d628f 100644 --- a/tests/api/test_cluster.cpp +++ b/tests/api/test_cluster.cpp @@ -30,68 +30,14 @@ using namespace tt::umd; // N150. N300 // Galaxy -// TODO: This function should not exist, the API itself should be simple enough. -inline std::unique_ptr get_cluster_desc() { - // TODO: remove getting manually cluster descriptor from yaml. - std::string yaml_path = tt_ClusterDescriptor::get_cluster_descriptor_file_path(); - - return tt_ClusterDescriptor::create_from_yaml(yaml_path); -} - -// TODO: This function should not exist, the API itself should be simple enough. inline std::unique_ptr get_cluster() { - // TODO: This should not be needed. And could be part of the cluster descriptor probably. - // Note that cluster descriptor holds logical ids of chips. - // Which are different than physical PCI ids, which are /dev/tenstorrent/N ones. - // You have to see if physical PCIe is GS before constructing a cluster descriptor. std::vector pci_device_ids = PCIDevice::enumerate_devices(); - std::set pci_device_ids_set(pci_device_ids.begin(), pci_device_ids.end()); - - tt::ARCH device_arch = tt::ARCH::GRAYSKULL; - if (!pci_device_ids.empty()) { - // TODO: This should be removed from the API, the driver itself should do it. - int physical_device_id = pci_device_ids[0]; - // TODO: remove logical_device_id - PCIDevice pci_device(physical_device_id, 0); - device_arch = pci_device.get_arch(); - } - // TODO: Make this test work on a host system without any tt devices. if (pci_device_ids.empty()) { return nullptr; } - - std::string yaml_path; - if (device_arch == tt::ARCH::GRAYSKULL) { - yaml_path = ""; - } else if (device_arch == tt::ARCH::BLACKHOLE) { - yaml_path = test_utils::GetAbsPath("blackhole_1chip_cluster.yaml"); - } else { - // TODO: remove getting manually cluster descriptor from yaml. - yaml_path = tt_ClusterDescriptor::get_cluster_descriptor_file_path(); - } - // TODO: Remove the need to do this, allow default constructor to construct with all chips. - std::unique_ptr cluster_desc = get_cluster_desc(); - std::unordered_set detected_num_chips = cluster_desc->get_all_chips(); - - // TODO: make this unordered vs set conversion not needed. - std::set detected_num_chips_set(detected_num_chips.begin(), detected_num_chips.end()); - - // TODO: This would be incorporated inside SocDescriptor. - std::string soc_path; - if (device_arch == tt::ARCH::GRAYSKULL) { - soc_path = test_utils::GetAbsPath("tests/soc_descs/grayskull_10x12.yaml"); - } else if (device_arch == tt::ARCH::WORMHOLE_B0) { - soc_path = test_utils::GetAbsPath("tests/soc_descs/wormhole_b0_8x10.yaml"); - } else if (device_arch == tt::ARCH::BLACKHOLE) { - soc_path = test_utils::GetAbsPath("tests/soc_descs/blackhole_140_arch_no_eth.yaml"); - } else { - throw std::runtime_error("Unsupported architecture"); - } - - // TODO: Don't pass each of these arguments. return std::unique_ptr( - new Cluster(soc_path, tt_ClusterDescriptor::get_cluster_descriptor_file_path(), detected_num_chips_set)); + new Cluster()); } // TODO: Should not be wormhole specific. @@ -115,9 +61,10 @@ void setup_wormhole_remote(Cluster* umd_cluster) { TEST(ApiClusterTest, OpenAllChips) { std::unique_ptr umd_cluster = get_cluster(); } TEST(ApiClusterTest, SimpleIOAllChips) { - std::unique_ptr cluster_desc = get_cluster_desc(); std::unique_ptr umd_cluster = get_cluster(); + const tt_ClusterDescriptor* cluster_desc = umd_cluster->get_cluster_description(); + if (umd_cluster == nullptr || umd_cluster->get_all_chips_in_cluster().empty()) { GTEST_SKIP() << "No chips present on the system. Skipping test."; } @@ -172,9 +119,10 @@ TEST(ApiClusterTest, SimpleIOAllChips) { } TEST(ApiClusterTest, RemoteFlush) { - std::unique_ptr cluster_desc = get_cluster_desc(); std::unique_ptr umd_cluster = get_cluster(); + const tt_ClusterDescriptor* cluster_desc = umd_cluster->get_cluster_description(); + if (umd_cluster == nullptr || umd_cluster->get_all_chips_in_cluster().empty()) { GTEST_SKIP() << "No chips present on the system. Skipping test."; } @@ -229,3 +177,61 @@ TEST(ApiClusterTest, RemoteFlush) { std::cout << "Testing whole cluster wait for remote chip flush again, should be no-op." << std::endl; umd_cluster->wait_for_non_mmio_flush(); } + +TEST(ApiClusterTest, SimpleIOSpecificChips) { + std::unique_ptr umd_cluster = std::make_unique(0); + + const tt_ClusterDescriptor* cluster_desc = umd_cluster->get_cluster_description(); + + if (umd_cluster == nullptr || umd_cluster->get_all_chips_in_cluster().empty()) { + GTEST_SKIP() << "No chips present on the system. Skipping test."; + } + + // Initialize random data. + size_t data_size = 1024; + std::vector data(data_size, 0); + for (int i = 0; i < data_size; i++) { + data[i] = i % 256; + } + + // TODO: this should be part of constructor if it is mandatory. + setup_wormhole_remote(umd_cluster.get()); + + for (auto chip_id : umd_cluster->get_all_chips_in_cluster()) { + const tt_SocDescriptor& soc_desc = umd_cluster->get_soc_descriptor(chip_id); + + // TODO: figure out if core locations should contain chip_id + tt_xy_pair any_core = soc_desc.workers[0]; + tt_cxy_pair any_core_global(chip_id, any_core); + + if (cluster_desc->is_chip_remote(chip_id) && soc_desc.arch != tt::ARCH::WORMHOLE_B0) { + std::cout << "Skipping remote chip " << chip_id << " because it is not a wormhole_b0 chip." << std::endl; + continue; + } + + std::cout << "Writing to chip " << chip_id << " core " << any_core.str() << std::endl; + + umd_cluster->write_to_device(data.data(), data_size, any_core_global, 0, "LARGE_WRITE_TLB"); + } + + // Now read back the data. + for (auto chip_id : umd_cluster->get_all_chips_in_cluster()) { + const tt_SocDescriptor& soc_desc = umd_cluster->get_soc_descriptor(chip_id); + + // TODO: figure out if core locations should contain chip_id + tt_xy_pair any_core = soc_desc.workers[0]; + tt_cxy_pair any_core_global(chip_id, any_core); + + if (cluster_desc->is_chip_remote(chip_id) && soc_desc.arch != tt::ARCH::WORMHOLE_B0) { + std::cout << "Skipping remote chip " << chip_id << " because it is not a wormhole_b0 chip." << std::endl; + continue; + } + + std::cout << "Reading from chip " << chip_id << " core " << any_core.str() << std::endl; + + std::vector readback_data(data_size, 0); + umd_cluster->read_from_device(readback_data.data(), any_core_global, 0, data_size, "LARGE_READ_TLB"); + + ASSERT_EQ(data, readback_data); + } +} diff --git a/tests/blackhole/test_bh_common.h b/tests/blackhole/test_bh_common.h index 57fdf25c..5d115e31 100644 --- a/tests/blackhole/test_bh_common.h +++ b/tests/blackhole/test_bh_common.h @@ -54,7 +54,7 @@ class BlackholeTestFixture : public ::testing::Test { std::iota(devices.begin(), devices.end(), 0); std::set target_devices = {devices.begin(), devices.end()}; uint32_t num_host_mem_ch_per_mmio_device = 1; - device = std::make_unique(test_utils::GetAbsPath(SOC_DESC_PATH), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + device = std::make_unique(num_host_mem_ch_per_mmio_device, false, true, true); assert(device != nullptr); assert(device->get_cluster_description()->get_number_of_chips() == get_detected_num_chips()); diff --git a/tests/blackhole/test_silicon_driver_bh.cpp b/tests/blackhole/test_silicon_driver_bh.cpp index abaff80a..b2b7bde1 100644 --- a/tests/blackhole/test_silicon_driver_bh.cpp +++ b/tests/blackhole/test_silicon_driver_bh.cpp @@ -207,7 +207,7 @@ TEST(SiliconDriverBH, UnalignedStaticTLB_RW) { uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/blackhole_140_arch_no_eth.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true); set_params_for_remote_txn(device); auto mmio_devices = device.get_target_mmio_device_ids(); @@ -266,7 +266,7 @@ TEST(SiliconDriverBH, StaticTLB_RW) { uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/blackhole_140_arch_no_eth.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true); set_params_for_remote_txn(device); auto mmio_devices = device.get_target_mmio_device_ids(); @@ -316,7 +316,7 @@ TEST(SiliconDriverBH, DynamicTLB_RW) { std::set target_devices = get_target_devices(); uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/blackhole_140_arch_no_eth.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true); set_params_for_remote_txn(device); @@ -380,7 +380,7 @@ TEST(SiliconDriverBH, MultiThreadedDevice) { std::set target_devices = get_target_devices(); uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/blackhole_140_arch_no_eth.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true); set_params_for_remote_txn(device); @@ -439,7 +439,7 @@ TEST(SiliconDriverBH, MultiThreadedMemBar) { uint32_t base_addr = l1_mem::address_map::DATA_BUFFER_SPACE_BASE; uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/blackhole_140_arch_no_eth.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true); set_params_for_remote_txn(device); for(int i = 0; i < target_devices.size(); i++) { // Iterate over devices and only setup static TLBs for functional worker cores @@ -541,7 +541,7 @@ TEST(SiliconDriverBH, DISABLED_BroadcastWrite) { // Cannot broadcast to tensix/e uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/blackhole_140_arch_no_eth.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true); set_params_for_remote_txn(device); auto mmio_devices = device.get_target_mmio_device_ids(); @@ -598,7 +598,7 @@ TEST(SiliconDriverBH, DISABLED_VirtualCoordinateBroadcast) { // same problem as uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/blackhole_140_arch_no_eth.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true); set_params_for_remote_txn(device); auto mmio_devices = device.get_target_mmio_device_ids(); diff --git a/tests/grayskull/test_silicon_driver.cpp b/tests/grayskull/test_silicon_driver.cpp index c8fca4bf..c61a3a2e 100644 --- a/tests/grayskull/test_silicon_driver.cpp +++ b/tests/grayskull/test_silicon_driver.cpp @@ -20,7 +20,7 @@ TEST(SiliconDriverGS, CreateDestroySequential) { uint32_t num_host_mem_ch_per_mmio_device = 1; tt_device_params default_params; for(int i = 0; i < 100; i++) { - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/grayskull_10x12.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true); device.start_device(default_params); device.deassert_risc_reset(); device.close_device(); @@ -34,7 +34,7 @@ TEST(SiliconDriverGS, CreateMultipleInstance) { default_params.init_device = false; std::unordered_map concurrent_devices = {}; for(int i = 0; i < 100; i++) { - concurrent_devices.insert({i, new Cluster(test_utils::GetAbsPath("tests/soc_descs/grayskull_10x12.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true)}); + concurrent_devices.insert({i, new Cluster(num_host_mem_ch_per_mmio_device, false, true)}); concurrent_devices.at(i) -> start_device(default_params); } @@ -48,7 +48,7 @@ TEST(SiliconDriverGS, Harvesting) { std::set target_devices = {0}; std::unordered_map simulated_harvesting_masks = {{0, 6}, {1, 12}}; uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/grayskull_10x12.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true, simulated_harvesting_masks); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true, simulated_harvesting_masks); auto sdesc_per_chip = device.get_virtual_soc_descriptors(); ASSERT_EQ(device.using_harvested_soc_descriptors(), true) << "Expected Driver to have performed harvesting"; @@ -85,7 +85,7 @@ TEST(SiliconDriverGS, HarvestingRuntime) { std::set target_devices = {0}; std::unordered_map simulated_harvesting_masks = {{0, 6}, {1, 12}}; uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/grayskull_10x12.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true, simulated_harvesting_masks); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true, simulated_harvesting_masks); for(int i = 0; i < target_devices.size(); i++) { // Iterate over devices and only setup static TLBs for functional worker cores @@ -148,7 +148,7 @@ TEST(SiliconDriverGS, StaticTLB_RW) { std::set target_devices = {0}; uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/grayskull_10x12.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true); for(int i = 0; i < target_devices.size(); i++) { // Iterate over devices and only setup static TLBs for worker cores auto& sdesc = device.get_virtual_soc_descriptors().at(i); @@ -196,7 +196,7 @@ TEST(SiliconDriverGS, DynamicTLB_RW) { std::set target_devices = {0}; uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/grayskull_10x12.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true); device.set_fallback_tlb_ordering_mode("SMALL_READ_WRITE_TLB", TLB_DATA::Posted); // Explicitly test API to set fallback tlb ordering mode tt_device_params default_params; device.start_device(default_params); @@ -238,7 +238,8 @@ TEST(SiliconDriverGS, MultiThreadedDevice) { std::set target_devices = {0}; uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/grayskull_10x12.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true); + tt_device_params default_params; device.start_device(default_params); device.deassert_risc_reset(); @@ -315,8 +316,8 @@ TEST(SiliconDriverGS, MultiThreadedMemBar) { // this tests takes ~5 mins to run uint32_t base_addr = l1_mem::address_map::DATA_BUFFER_SPACE_BASE; uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/grayskull_10x12.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true); - + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true); + for(int i = 0; i < target_devices.size(); i++) { // Iterate over devices and only setup static TLBs for functional worker cores auto& sdesc = device.get_virtual_soc_descriptors().at(i); diff --git a/tests/wormhole/test_silicon_driver_wh.cpp b/tests/wormhole/test_silicon_driver_wh.cpp index 0f8f9099..48834d48 100644 --- a/tests/wormhole/test_silicon_driver_wh.cpp +++ b/tests/wormhole/test_silicon_driver_wh.cpp @@ -92,7 +92,7 @@ TEST(SiliconDriverWH, Harvesting) { std::unordered_map simulated_harvesting_masks = {{0, 30}, {1, 60}}; uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/wormhole_b0_8x10.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true, simulated_harvesting_masks); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true, simulated_harvesting_masks); auto sdesc_per_chip = device.get_virtual_soc_descriptors(); ASSERT_EQ(device.using_harvested_soc_descriptors(), true) << "Expected Driver to have performed harvesting"; @@ -133,8 +133,8 @@ TEST(SiliconDriverWH, HarvestingRuntime) { std::unordered_map simulated_harvesting_masks = {{0, 30}, {1, 60}}; uint32_t num_host_mem_ch_per_mmio_device = 1; - - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/wormhole_b0_8x10.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true, simulated_harvesting_masks); + + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true, simulated_harvesting_masks); set_params_for_remote_txn(device); auto mmio_devices = device.get_target_mmio_device_ids(); @@ -198,7 +198,7 @@ TEST(SiliconDriverWH, UnalignedStaticTLB_RW) { int num_devices = target_devices.size(); uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/wormhole_b0_8x10.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true); set_params_for_remote_txn(device); auto mmio_devices = device.get_target_mmio_device_ids(); @@ -256,7 +256,7 @@ TEST(SiliconDriverWH, StaticTLB_RW) { std::set target_devices = get_target_devices(); uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/wormhole_b0_8x10.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true); set_params_for_remote_txn(device); auto mmio_devices = device.get_target_mmio_device_ids(); @@ -305,7 +305,7 @@ TEST(SiliconDriverWH, DynamicTLB_RW) { std::set target_devices = get_target_devices(); uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/wormhole_b0_8x10.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true); set_params_for_remote_txn(device); @@ -343,7 +343,8 @@ TEST(SiliconDriverWH, MultiThreadedDevice) { std::set target_devices = get_target_devices(); uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/wormhole_b0_8x10.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true); + set_params_for_remote_txn(device); tt_device_params default_params; @@ -401,7 +402,7 @@ TEST(SiliconDriverWH, MultiThreadedMemBar) { uint32_t base_addr = l1_mem::address_map::DATA_BUFFER_SPACE_BASE; uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/wormhole_b0_8x10.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true); set_params_for_remote_txn(device); auto mmio_devices = device.get_target_mmio_device_ids(); @@ -507,7 +508,8 @@ TEST(SiliconDriverWH, BroadcastWrite) { std::set target_devices = get_target_devices(); uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/wormhole_b0_8x10.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true); set_params_for_remote_txn(device); auto mmio_devices = device.get_target_mmio_device_ids(); @@ -562,7 +564,8 @@ TEST(SiliconDriverWH, VirtualCoordinateBroadcast) { std::set target_devices = get_target_devices(); uint32_t num_host_mem_ch_per_mmio_device = 1; - Cluster device = Cluster(test_utils::GetAbsPath("tests/soc_descs/wormhole_b0_8x10.yaml"), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + + Cluster device = Cluster(num_host_mem_ch_per_mmio_device, false, true, true); set_params_for_remote_txn(device); auto mmio_devices = device.get_target_mmio_device_ids(); @@ -644,13 +647,10 @@ TEST(SiliconDriverWH, VirtualCoordinateBroadcast) { TEST(SiliconDriverWH, SysmemTestWithPcie) { auto target_devices = get_target_devices(); - Cluster cluster(test_utils::GetAbsPath("tests/soc_descs/wormhole_b0_8x10.yaml"), - tt_ClusterDescriptor::get_cluster_descriptor_file_path(), - target_devices, - 1, // one "host memory channel", currently a 1G huge page - false, // skip driver allocs - no (don't skip) - true, // clean system resources - yes - true); // perform harvesting - yes + Cluster cluster(1, // one "host memory channel", currently a 1G huge page + false, // skip driver allocs - no (don't skip) + true, // clean system resources - yes + true); // perform harvesting - yes set_params_for_remote_txn(cluster); cluster.start_device(tt_device_params{}); // no special parameters diff --git a/tests/wormhole/test_wh_common.h b/tests/wormhole/test_wh_common.h index 98cdf8ad..812f8b98 100644 --- a/tests/wormhole/test_wh_common.h +++ b/tests/wormhole/test_wh_common.h @@ -55,7 +55,7 @@ class WormholeTestFixture : public ::testing::Test { std::iota(devices.begin(), devices.end(), 0); std::set target_devices = {devices.begin(), devices.end()}; uint32_t num_host_mem_ch_per_mmio_device = 1; - device = std::make_unique(test_utils::GetAbsPath(SOC_DESC_PATH), tt_ClusterDescriptor::get_cluster_descriptor_file_path(), target_devices, num_host_mem_ch_per_mmio_device, false, true, true); + device = std::make_unique(num_host_mem_ch_per_mmio_device, false, true, true); assert(device != nullptr); assert(device->get_cluster_description()->get_number_of_chips() == get_detected_num_chips());