Skip to content

Commit

Permalink
Add 3 more extention check
Browse files Browse the repository at this point in the history
  • Loading branch information
LiyangLingIntel committed Jun 28, 2024
1 parent 9060485 commit dcd81cd
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 3 deletions.
3 changes: 3 additions & 0 deletions csrc/gpu/aten/core/DeviceInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@ struct DeviceInfo {
std::vector<size_t> sub_group_sizes;
bool support_fp64;
bool support_cl_bf16_conversion;
bool support_cl_sg_matmul_acc;
bool support_cl_sg_matmul_acc_tf32;
bool support_cl_sg_2d_block_io;
};

} // namespace dpcpp
Expand Down
10 changes: 8 additions & 2 deletions csrc/gpu/runtime/Device.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -322,8 +322,11 @@ static void initDeviceProperty(DeviceId device_id) {
: 8;
device_prop.support_atomic64 = device.has(dpcpp_dev_aspect_atomic64);
device_prop.support_fp64 = device.has(dpcpp_dev_aspect_fp64);
sycl::ext::oneapi::experimental::cl_version version{20, 20, 20};
sycl::ext::oneapi::experimental::cl_version version;
device_prop.support_cl_bf16_conversion = device.ext_oneapi_supports_cl_extension("cl_intel_bfloat16_conversions", &version);
device_prop.support_cl_sg_matmul_acc = device.ext_oneapi_supports_cl_extension("cl_intel_subgroup_matrix_multiply_accumulate", &version);
device_prop.support_cl_sg_matmul_acc_tf32 = device.ext_oneapi_supports_cl_extension("cl_intel_subgroup_matrix_multiply_accumulate_tensor_float32", &version);
device_prop.support_cl_sg_2d_block_io = device.ext_oneapi_supports_cl_extension("cl_intel_subgroup_2d_block_io", &version);

device_properties[device_id] = device_prop;

Expand Down Expand Up @@ -358,9 +361,12 @@ static void initDeviceProperty(DeviceId device_id) {
dev_info.max_num_sub_groups = device_prop.max_num_subgroup;
dev_info.sub_group_sizes = device_prop.subgroup_sizes;
dev_info.support_fp64 = device_prop.support_fp64;
dev_info.support_cl_bf16_conversion = device_prop.support_cl_bf16_conversion;
dev_info.support_cl_sg_matmul_acc = device_prop.support_cl_sg_matmul_acc;
dev_info.support_cl_sg_matmul_acc_tf32 = device_prop.support_cl_sg_matmul_acc_tf32;
dev_info.support_cl_sg_2d_block_io = device_prop.support_cl_sg_2d_block_io;
#if (defined(__INTEL_LLVM_COMPILER) && __INTEL_LLVM_COMPILER >= 20240100)
dev_info.device_arch = static_cast<uint64_t>(device_prop.device_arch);
dev_info.support_cl_bf16_conversion = device_prop.support_cl_bf16_conversion;
#else
dev_info.device_arch = (uint64_t)0;
#endif
Expand Down
3 changes: 3 additions & 0 deletions csrc/gpu/runtime/DeviceProp.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,9 @@ struct DeviceProp {
bool support_fp64;
bool support_atomic64;
bool support_cl_bf16_conversion;
bool support_cl_sg_matmul_acc;
bool support_cl_sg_matmul_acc_tf32;
bool support_cl_sg_2d_block_io;
};

} // namespace dpcpp
Expand Down
9 changes: 8 additions & 1 deletion intel_extension_for_pytorch/csrc/xpu/Module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -578,6 +578,9 @@ static void register_xpu_device_info(PyObject* module) {
.def_readonly("sub_group_sizes", &DeviceInfo::sub_group_sizes)
.def_readonly("has_fp64", &DeviceInfo::support_fp64)
.def_readonly("support_cl_bf16_conversion", &DeviceInfo::support_cl_bf16_conversion)
.def_readonly("support_cl_sg_matmul_acc", &DeviceInfo::support_cl_sg_matmul_acc)
.def_readonly("support_cl_sg_matmul_acc_tf32", &DeviceInfo::support_cl_sg_matmul_acc_tf32)
.def_readonly("support_cl_sg_2d_block_io", &DeviceInfo::support_cl_sg_2d_block_io)
.def_readonly("device_arch", &DeviceInfo::device_arch)
.def_property_readonly(
"dev_type", [](const DeviceInfo& info) { return get_dev_type(info); })
Expand All @@ -591,7 +594,11 @@ static void register_xpu_device_info(PyObject* module) {
<< "MB, max_compute_units=" << info.max_compute_units
<< ", gpu_eu_count=" << info.gpu_eu_count
<< ", device_arch=" << info.device_arch
<< ", support_cl_bf16_conversion=" << info.support_cl_bf16_conversion << ")";
<< ", support_cl_bf16_conversion=" << info.support_cl_bf16_conversion
<< ", support_cl_sg_matmul_acc=" << info.support_cl_sg_matmul_acc
<< ", support_cl_sg_matmul_acc_tf32=" << info.support_cl_sg_matmul_acc_tf32
<< ", support_cl_sg_2d_block_io=" << info.support_cl_sg_2d_block_io
<< ")";
return stream.str();
});
}
Expand Down

0 comments on commit dcd81cd

Please sign in to comment.