forked from tenstorrent/tt-buda-demos
-
Notifications
You must be signed in to change notification settings - Fork 0
/
timm_xception.py
78 lines (61 loc) · 2.52 KB
/
timm_xception.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC
# SPDX-License-Identifier: Apache-2.0
# Xception
import os
import urllib
import pybuda
import requests
import timm
import torch
from PIL import Image
from pybuda._C.backend_api import BackendDevice
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
def run_xception_timm(variant="xception"):
"""
Variants = {
'xception',
'xception41',
'xception65',
'xception71'
}
"""
# Set PyBuda configuration parameters
compiler_cfg = pybuda.config._get_global_compiler_config() # load global compiler config object
compiler_cfg.default_df_override = pybuda.DataFormat.Float16_b
available_devices = pybuda.detect_available_devices()
if variant == "xception":
if available_devices[0] == BackendDevice.Wormhole_B0:
compiler_cfg.balancer_policy = "CNN"
elif available_devices[0] == BackendDevice.Grayskull:
os.environ["PYBUDA_TEMP_DISABLE_MODEL_KB_PROLOGUE_BW"] = "1"
if available_devices[0] == BackendDevice.Grayskull:
compiler_cfg.balancer_policy = "Ribbon"
os.environ["PYBUDA_RIBBON2"] = "1"
os.environ["PYBUDA_FORCE_CONV_MULTI_OP_FRACTURE"] = "1"
compiler_cfg.default_dram_parameters = False
model_name = variant
model = timm.create_model(model_name, pretrained=True)
# preprocessing
config = resolve_data_config({}, model=model)
transform = create_transform(**config)
url = "https://raw.githubusercontent.com/pytorch/hub/master/images/dog.jpg"
img = Image.open(requests.get(url, stream=True).raw).convert("RGB")
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
# Create PyBuda module from PyTorch model
tt_model = pybuda.PyTorchModule(f"{variant}_timm_pt", model)
# Run inference on Tenstorrent device
output_q = pybuda.run_inference(tt_model, inputs=([tensor]))
output = output_q.get()[0].value()
# postprocessing
probabilities = torch.nn.functional.softmax(output[0], dim=0)
# Get imagenet class mappings
url = "https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt"
image_classes = urllib.request.urlopen(url)
categories = [s.decode("utf-8").strip() for s in image_classes.readlines()]
# Print top categories per image
top5_prob, top5_catid = torch.topk(probabilities, 5)
for i in range(top5_prob.size(0)):
print(categories[top5_catid[i]], top5_prob[i].item())
if __name__ == "__main__":
run_xception_timm()