forked from Womcos/SCARF
-
Notifications
You must be signed in to change notification settings - Fork 0
/
setup.py
113 lines (96 loc) · 3.04 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import io
import os
import glob
import subprocess
from setuptools import setup, find_packages
import torch
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
cwd = os.path.dirname(os.path.abspath(__file__))
version = '1.2.2'
try:
if not os.getenv('RELEASE'):
from datetime import date
today = date.today()
day = today.strftime("b%Y%m%d")
version += day
except Exception:
pass
def create_version_file():
global version, cwd
print('-- Building version ' + version)
version_path = os.path.join(cwd, 'encoding', 'version.py')
with open(version_path, 'w') as f:
f.write('"""This is encoding version file."""\n')
f.write("__version__ = '{}'\n".format(version))
requirements = [
'numpy',
'tqdm',
'nose',
'portalocker',
'torch>=1.4.0',
'torchvision>=0.5.0',
'Pillow',
'scipy',
'requests',
]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
cpu_extensions_dir = os.path.join(this_dir, "encoding", "lib", "cpu")
gpu_extensions_dir = os.path.join(this_dir, "encoding", "lib", "gpu")
source_cpu = glob.glob(os.path.join(cpu_extensions_dir, "*.cpp"))
source_cuda = glob.glob(os.path.join(gpu_extensions_dir, "*.cpp")) + \
glob.glob(os.path.join(gpu_extensions_dir, "*.cu"))
print('c++: ', source_cpu)
print('cuda: ', source_cuda)
sources = source_cpu
extra_compile_args = {"cxx": []}
include_dirs = [cpu_extensions_dir]
ext_modules = [
CppExtension(
"encoding.cpu",
source_cpu,
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
)
]
if CUDA_HOME is not None:
define_macros = [("WITH_CUDA", None)]
include_dirs += [gpu_extensions_dir]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
ext_modules.extend([
CUDAExtension(
"encoding.gpu",
source_cuda,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
])
return ext_modules
if __name__ == '__main__':
create_version_file()
setup(
name="torch-encoding",
version=version,
description="PyTorch Encoding Package",
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
license='MIT',
install_requires=requirements,
packages=find_packages(exclude=["tests", "experiments"]),
package_data={ 'encoding': [
'LICENSE',
'lib/cpu/*.h',
'lib/cpu/*.cpp',
'lib/gpu/*.h',
'lib/gpu/*.cpp',
'lib/gpu/*.cu',
]},
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)