From 1a305a6ff2a102766fc300fb5617e8650950abbc Mon Sep 17 00:00:00 2001 From: sabira-mcw Date: Thu, 12 Sep 2024 12:13:22 +0000 Subject: [PATCH] #12558: TTNN implementation of MNIST model --- models/demos/mnist/README.md | 15 +++ models/demos/mnist/demo/demo.py | 79 +++++++++++++ models/demos/mnist/tests/test_perf_mnist.py | 111 ++++++++++++++++++ models/demos/mnist/tt/tt_functional_mnist.py | 57 +++++++++ models/experimental/mnist/reference/mnist.py | 2 +- tests/scripts/run_performance.sh | 5 + .../single_card/run_single_card_demo_tests.sh | 3 + .../integration_tests/mnist/test_mnist.py | 44 +++++++ 8 files changed, 315 insertions(+), 1 deletion(-) create mode 100644 models/demos/mnist/README.md create mode 100644 models/demos/mnist/demo/demo.py create mode 100644 models/demos/mnist/tests/test_perf_mnist.py create mode 100644 models/demos/mnist/tt/tt_functional_mnist.py create mode 100644 tests/ttnn/integration_tests/mnist/test_mnist.py diff --git a/models/demos/mnist/README.md b/models/demos/mnist/README.md new file mode 100644 index 000000000000..e677174c8eb0 --- /dev/null +++ b/models/demos/mnist/README.md @@ -0,0 +1,15 @@ +## INTRODUCTION +The MNIST model uses only fully connected linear layers to classify handwritten digits from the MNIST dataset. Despite the absence of convolutional layers, the model efficiently processes the 28x28 pixel images by flattening them into a 1D vector and passing them through multiple linear layers to predict the corresponding digit (0-9). This approach demonstrates how even simpler architectures can be applied for image classification tasks. + +## How to Run + +To run the demo for digit classification using the MNIST model, follow these instructions: + +- Use the following command to run the MNIST model. + ``` + pytest models/experimental/functional_mnist/demo/demo.py::test_demo_dataset + ``` + +# Details + +The reshape op is in the Host. diff --git a/models/demos/mnist/demo/demo.py b/models/demos/mnist/demo/demo.py new file mode 100644 index 000000000000..03bec1a6e90b --- /dev/null +++ b/models/demos/mnist/demo/demo.py @@ -0,0 +1,79 @@ +# SPDX-FileCopyrightText: © 2023 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import torch +import ttnn + +from torchvision import transforms, datasets +from loguru import logger + +from torch.utils.data import DataLoader +from models.experimental.mnist.reference.mnist import MnistModel +from models.demos.mnist.tt import tt_functional_mnist + +from ttnn.model_preprocessing import preprocess_model_parameters + + +def run_demo_dataset(device, batch_size, iterations, model_location_generator): + # Data preprocessing/loading + transform = transforms.Compose([transforms.ToTensor()]) + test_dataset = datasets.MNIST(root="./data", train=False, transform=transform, download=True) + + # Load model + state_dict = torch.load(model_location_generator("mnist_model.pt", model_subdir="mnist")) + model = MnistModel(state_dict) + model = model.eval() + + parameters = preprocess_model_parameters( + initialize_model=lambda: model, + convert_to_ttnn=lambda *_: True, + ) + correct = 0 + for iters in range(iterations): + dataloader = DataLoader(test_dataset, batch_size=batch_size) + x, labels = next(iter(dataloader)) + dataset_predictions = [] + ttnn_predictions = [] + dataset_ttnn_correct = 0 + x = ttnn.from_torch(x, dtype=ttnn.bfloat16) + tt_output = tt_functional_mnist.mnist(device, batch_size, x, parameters) + tt_output = ttnn.to_torch(tt_output).permute(1, 2, 0, 3).squeeze(0).squeeze(0) + predicted_probabilities = torch.nn.functional.softmax(tt_output, dim=1) + _, predicted_label = torch.max(predicted_probabilities, 1) + tt_output = tt_output + for i in range(batch_size): + dataset_predictions.append(labels[i]) + ttnn_predictions.append(predicted_label[i]) + logger.info(f"Iter: {iters} Sample {i}:") + logger.info(f"Expected Label: {dataset_predictions[i]}") + logger.info(f"Predicted Label: {ttnn_predictions[i]}") + + if dataset_predictions[i] == ttnn_predictions[i]: + dataset_ttnn_correct += 1 + correct += 1 + dataset_ttnn_accuracy = dataset_ttnn_correct / (batch_size) + logger.info( + f"ImageNet Inference Accuracy for iter {iters} of {batch_size} input samples : {dataset_ttnn_accuracy}" + ) + + accuracy = correct / (batch_size * iterations) + logger.info(f"ImageNet Inference Accuracy for {batch_size}x{iterations} Samples : {accuracy}") + + +@pytest.mark.parametrize("device_params", [{"l1_small_size": 32768}], indirect=True) +@pytest.mark.parametrize("batch_size", [8]) +@pytest.mark.parametrize("iterations", [1]) +def test_demo_dataset( + device, + batch_size, + iterations, + model_location_generator, +): + return run_demo_dataset( + device=device, + batch_size=batch_size, + iterations=iterations, + model_location_generator=model_location_generator, + ) diff --git a/models/demos/mnist/tests/test_perf_mnist.py b/models/demos/mnist/tests/test_perf_mnist.py new file mode 100644 index 000000000000..fb337c2cfb08 --- /dev/null +++ b/models/demos/mnist/tests/test_perf_mnist.py @@ -0,0 +1,111 @@ +# SPDX-FileCopyrightText: © 2023 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +import ttnn +import time +import pytest +import torch +from loguru import logger +from torchvision import transforms, datasets +from models.perf.perf_utils import prep_perf_report +from models.demos.mnist.tt import tt_functional_mnist +from ttnn.model_preprocessing import preprocess_model_parameters +from models.experimental.mnist.reference.mnist import MnistModel + +from models.perf.device_perf_utils import run_device_perf, check_device_perf, prep_device_perf_report + +transform = transforms.Compose([transforms.ToTensor()]) +test_dataset = datasets.MNIST(root="./data", train=False, transform=None, download=True) + + +def get_expected_times(functional_mnist): + return { + tt_functional_mnist: (2, 0.004), + }[functional_mnist] + + +@pytest.mark.models_performance_bare_metal +@pytest.mark.models_performance_virtual_machine +@pytest.mark.parametrize( + "batch_size", + [8], +) +@pytest.mark.parametrize( + "functional_mnist", + [tt_functional_mnist], +) +@pytest.mark.parametrize("device_params", [{"l1_small_size": 32768}], indirect=True) +def test_performance_mnist(device, batch_size, functional_mnist, model_location_generator): + state_dict = torch.load(model_location_generator("mnist_model.pt", model_subdir="mnist")) + model = MnistModel(state_dict) + model = model.eval() + parameters = preprocess_model_parameters( + initialize_model=lambda: model, + convert_to_ttnn=lambda *_: True, + ) + input_img, _ = test_dataset[1] + + test_input = transforms.ToTensor()(input_img) + test_input = ttnn.from_torch(test_input, dtype=ttnn.bfloat16) + durations = [] + for _ in range(2): + start = time.time() + + ttnn_output = tt_functional_mnist.mnist( + device=device, + x=test_input, + batch_size=batch_size, + parameters=parameters, + ) + end = time.time() + durations.append(end - start) + + inference_and_compile_time, *inference_times = durations + average_inference_time = sum(inference_times) / len(inference_times) + expected_compile_time, expected_inference_time = get_expected_times(functional_mnist) + + prep_perf_report( + model_name="MNIST", + batch_size=batch_size, + inference_and_compile_time=inference_and_compile_time, + inference_time=average_inference_time, + expected_compile_time=expected_compile_time, + expected_inference_time=expected_inference_time, + comments="", + inference_time_cpu=0.0, + ) + + logger.info(f"Compile time: {inference_and_compile_time - average_inference_time}") + logger.info(f"Inference time: {average_inference_time}") + logger.info(f"Inference times: {inference_times}") + logger.info(f"Sample(s) per second: {1 / average_inference_time * batch_size}") + + +@pytest.mark.parametrize( + "batch_size, expected_perf", + [ + [8, 177450], + ], +) +@pytest.mark.models_device_performance_bare_metal +def test_perf_device_bare_metal(batch_size, expected_perf): + subdir = "ttnn_mnist" + num_iterations = 1 + margin = 0.03 + + command = f"pytest tests/ttnn/integration_tests/mnist/test_mnist.py" + cols = ["DEVICE FW", "DEVICE KERNEL", "DEVICE BRISC KERNEL"] + + inference_time_key = "AVG DEVICE KERNEL SAMPLES/S" + expected_perf_cols = {inference_time_key: expected_perf} + + post_processed_results = run_device_perf(command, subdir, num_iterations, cols, batch_size) + expected_results = check_device_perf(post_processed_results, margin, expected_perf_cols) + prep_device_perf_report( + model_name=f"tt_functional_mnist{batch_size}", + batch_size=batch_size, + post_processed_results=post_processed_results, + expected_results=expected_results, + comments="", + ) diff --git a/models/demos/mnist/tt/tt_functional_mnist.py b/models/demos/mnist/tt/tt_functional_mnist.py new file mode 100644 index 000000000000..403d119819d5 --- /dev/null +++ b/models/demos/mnist/tt/tt_functional_mnist.py @@ -0,0 +1,57 @@ +# SPDX-FileCopyrightText: © 2023 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +import ttnn +import torch + + +def mnist(device, batch_size, x, parameters): + # x = torch.reshape(x, [x.shape[0], 1, 1, 784]) + x = ttnn.reshape(x, (x.shape[0], 1, 1, 784)) + + weights_tensor = parameters.fc1.weight + weights_tensor = ttnn.to_device(weights_tensor, device=device) + bias_tensor = parameters.fc1.bias + bias_tensor = ttnn.to_device(bias_tensor, device=device) + x = ttnn.to_device(x, device=device) + x = ttnn.to_layout(x, layout=ttnn.TILE_LAYOUT) + x = ttnn.linear( + x, + weights_tensor, + bias=bias_tensor, + memory_config=ttnn.L1_MEMORY_CONFIG, + ) + x = ttnn.relu(x) + + weights_tensor = parameters.fc2.weight + weights_tensor = ttnn.to_device(weights_tensor, device=device) + + bias_tensor = parameters.fc2.bias + bias_tensor = ttnn.to_device(bias_tensor, device=device) + + x = ttnn.linear( + x, + weights_tensor, + bias=bias_tensor, + memory_config=ttnn.L1_MEMORY_CONFIG, + ) + x = ttnn.relu(x) + + weights_tensor = parameters.fc3.weight + weights_tensor = ttnn.to_device(weights_tensor, device=device) + + bias_tensor = parameters.fc3.bias + bias_tensor = ttnn.to_device(bias_tensor, device=device) + + x = ttnn.linear( + x, + weights_tensor, + bias=bias_tensor, + memory_config=ttnn.L1_MEMORY_CONFIG, + ) + x = ttnn.relu(x) + + x = ttnn.softmax(x) + + return x diff --git a/models/experimental/mnist/reference/mnist.py b/models/experimental/mnist/reference/mnist.py index 46a1213de968..b9cfb7365f4f 100644 --- a/models/experimental/mnist/reference/mnist.py +++ b/models/experimental/mnist/reference/mnist.py @@ -16,7 +16,7 @@ def __init__(self, state_dict): self.load_state_dict(state_dict) def forward(self, x): - x = x.view(-1) + x = x.view(x.shape[0], -1) x = self.fc1(x) x = torch.nn.functional.relu(x) diff --git a/tests/scripts/run_performance.sh b/tests/scripts/run_performance.sh index dbe5becdaca3..d449085f76bc 100755 --- a/tests/scripts/run_performance.sh +++ b/tests/scripts/run_performance.sh @@ -29,6 +29,8 @@ run_perf_models_other() { env pytest -n auto models/demos/metal_BERT_large_11/tests -m $test_marker + env pytest -n auto models/demos/mnist/tests/test_perf_mnist.py -m $test_marker + ## Merge all the generated reports env python models/perf/merge_perf_results.py } @@ -74,6 +76,9 @@ run_device_perf_models() { env pytest models/demos/distilbert/tests -m $test_marker + env pytest models/demos/mnist/tests -m $test_marker + + if [ "$tt_arch" == "grayskull" ]; then #TODO(MO): Until #6560 is fixed, GS device profiler test are grouped with #Model Device perf regression tests to make sure thy run on no-soft-reset BMs diff --git a/tests/scripts/single_card/run_single_card_demo_tests.sh b/tests/scripts/single_card/run_single_card_demo_tests.sh index c10b13dc5402..85150e532157 100755 --- a/tests/scripts/single_card/run_single_card_demo_tests.sh +++ b/tests/scripts/single_card/run_single_card_demo_tests.sh @@ -22,6 +22,9 @@ run_common_func_tests() { # Distilbert pytest --disable-warnings models/demos/distilbert/demo/demo.py --timeout 600; fail+=$? + # MNIST + pytest --disable-warnings models/demos/mnist/demo/demo.py --timeout 600; fail+=$? + return $fail } diff --git a/tests/ttnn/integration_tests/mnist/test_mnist.py b/tests/ttnn/integration_tests/mnist/test_mnist.py new file mode 100644 index 000000000000..45db0e27a42f --- /dev/null +++ b/tests/ttnn/integration_tests/mnist/test_mnist.py @@ -0,0 +1,44 @@ +# SPDX-FileCopyrightText: © 2023 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +import torch +import ttnn +import pytest +from tests.ttnn.utils_for_testing import assert_with_pcc +from ttnn.model_preprocessing import preprocess_model_parameters +from models.experimental.mnist.reference.mnist import MnistModel +from models.demos.mnist.tt import tt_functional_mnist +from torch.utils.data import DataLoader +from torchvision import transforms, datasets + + +@pytest.mark.parametrize("device_params", [{"l1_small_size": 32768}], indirect=True) +@pytest.mark.parametrize( + "batch_size", + [1], +) +def test_mnist(reset_seeds, device, batch_size, model_location_generator): + state_dict = torch.load(model_location_generator("mnist_model.pt", model_subdir="mnist")) + model = MnistModel(state_dict) + model = model.eval() + transform = transforms.Compose([transforms.ToTensor()]) + test_dataset = datasets.MNIST(root="./data", train=False, transform=transform, download=True) + dataloader = DataLoader(test_dataset, batch_size=batch_size) + + # x = torch.randn((1, 1, 28, 28), dtype=torch.bfloat16).float() # random_inputs + x, labels = next(iter(dataloader)) + + torch_output = model(x) + + parameters = preprocess_model_parameters( + initialize_model=lambda: model, + convert_to_ttnn=lambda *_: True, + ) + x = ttnn.from_torch(x, dtype=ttnn.bfloat16) + + tt_output = tt_functional_mnist.mnist(device, batch_size, x, parameters) + + tt_output = ttnn.to_torch(tt_output).permute(1, 2, 0, 3).squeeze(0).squeeze(0) + + assert_with_pcc(torch_output, tt_output, 0.99)