Skip to content

Commit

Permalink
Made use of library handler safer
Browse files Browse the repository at this point in the history
  • Loading branch information
PabloAndresCQ committed Jun 4, 2024
1 parent 600568d commit 7fc94cc
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 28 deletions.
13 changes: 11 additions & 2 deletions pytket/extensions/cutensornet/general.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,15 +52,24 @@ def __init__(self, device_id: Optional[int] = None):
self.dev = dev
self.device_id = dev.id

self.handle = cutn.create()
self._handle = cutn.create()

@property
def handle(self) -> Any:
if self._is_destroyed:
raise RuntimeError(
"The cuTensorNet library handle is out of scope.",
"See the documentation of CuTensorNetHandle.",
)
return self._handle

def destroy(self) -> None:
"""Destroys the memory handle, releasing memory.
Only call this method if you are initialising a ``CuTensorNetHandle`` outside
a ``with CuTensorNetHandle() as libhandle`` statement.
"""
cutn.destroy(self.handle)
cutn.destroy(self._handle)
self._is_destroyed = True

def __enter__(self) -> CuTensorNetHandle:
Expand Down
50 changes: 24 additions & 26 deletions pytket/extensions/cutensornet/general_state/tensor_network_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,7 @@ def __init__(
self._logger = set_logger("GeneralState", loglevel)
self._circuit = circuit
self._circuit.replace_implicit_wire_swaps()
self._handle = libhandle.handle
self._dev = libhandle.dev
self._lib = libhandle

libhandle.print_device_properties(self._logger)

Expand All @@ -64,7 +63,7 @@ def __init__(
self._work_desc = None

self._state = cutn.create_state(
self._handle, cutn.StatePurity.PURE, num_qubits, qubits_dims, data_type
self._lib.handle, cutn.StatePurity.PURE, num_qubits, qubits_dims, data_type
)
self._gate_tensors = []
for com in circuit.get_commands():
Expand All @@ -75,7 +74,7 @@ def __init__(
)

cutn.state_apply_tensor_operator(
handle=self._handle,
handle=self._lib.handle,
tensor_network_state=self._state,
num_state_modes=com.op.n_qubits,
state_modes=gate_qubit_indices,
Expand Down Expand Up @@ -112,7 +111,7 @@ def configure(self, attributes: Optional[dict] = None) -> GeneralState:
attr_dtype = cutn.state_get_attribute_dtype(attr)
attr_arr = np.asarray(val, dtype=attr_dtype)
cutn.state_configure(
self._handle,
self._lib.handle,
self._state,
attr,
attr_arr.ctypes.data,
Expand All @@ -138,20 +137,20 @@ def prepare(self, scratch_fraction: float = 0.5) -> GeneralState:
self._stream = (
cp.cuda.Stream()
) # In current cuTN release it is unused (could be 0x0)
free_mem = self._dev.mem_info[0]
free_mem = self._lib.dev.mem_info[0]
scratch_size = int(scratch_fraction * free_mem)
self._scratch_space = cp.cuda.alloc(scratch_size)
self._logger.debug(f"Allocated {scratch_size} bytes of scratch memory on GPU")
self._work_desc = cutn.create_workspace_descriptor(self._handle)
self._work_desc = cutn.create_workspace_descriptor(self._lib.handle)
cutn.state_prepare(
self._handle,
self._lib.handle,
self._state,
scratch_size,
self._work_desc,
self._stream.ptr, # type: ignore
)
workspace_size_d = cutn.workspace_get_memory_size(
self._handle,
self._lib.handle,
self._work_desc,
cutn.WorksizePref.RECOMMENDED,
cutn.Memspace.DEVICE,
Expand All @@ -160,7 +159,7 @@ def prepare(self, scratch_fraction: float = 0.5) -> GeneralState:

if workspace_size_d <= scratch_size:
cutn.workspace_set_memory(
self._handle,
self._lib.handle,
self._work_desc,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH,
Expand All @@ -175,7 +174,7 @@ def prepare(self, scratch_fraction: float = 0.5) -> GeneralState:
else:
self.destroy()
raise MemoryError(
f"Insufficient workspace size on the GPU device {self._dev.id}"
f"Insufficient workspace size on the GPU device {self._lib.dev.id}"
)

def compute(self, on_host: bool = True) -> Union[cp.ndarray, np.ndarray]:
Expand All @@ -193,7 +192,7 @@ def compute(self, on_host: bool = True) -> Union[cp.ndarray, np.ndarray]:
(2,) * self._circuit.n_qubits, dtype="complex128", order="F"
)
cutn.state_compute(
self._handle,
self._lib.handle,
self._state,
self._work_desc,
(state_vector.data.ptr,),
Expand Down Expand Up @@ -241,11 +240,11 @@ def __init__(
"I": _formatted_tensor(np.asarray([[1, 0], [0, 1]]), 1),
}
self._logger = set_logger("GeneralOperator", loglevel)
self._handle = libhandle.handle
self._lib = libhandle
qubits_dims = (2,) * num_qubits
data_type = cq.cudaDataType.CUDA_C_64F
self._operator = cutn.create_network_operator(
self._handle, num_qubits, qubits_dims, data_type
self._lib.handle, num_qubits, qubits_dims, data_type
)
self._logger.debug("Adding operator terms:")
for pauli_string, coeff in operator._dict.items():
Expand All @@ -262,7 +261,7 @@ def __init__(
)

cutn.network_operator_append_product(
handle=self._handle,
handle=self._lib.handle,
tensor_network_operator=self._operator,
coefficient=numeric_coeff,
num_tensors=num_pauli,
Expand Down Expand Up @@ -308,16 +307,15 @@ def __init__(
Raises:
MemoryError: If there is insufficient workspace size on a GPU device.
"""
self._handle = libhandle.handle
self._dev = libhandle.dev
self._lib = libhandle
self._logger = set_logger("GeneralExpectationValue", loglevel)

self._stream = None
self._scratch_space = None
self._work_desc = None

self._expectation = cutn.create_expectation(
self._handle, state.state, operator.operator
self._lib.handle, state.state, operator.operator
)

def configure(self, attributes: Optional[dict] = None) -> GeneralExpectationValue:
Expand Down Expand Up @@ -345,7 +343,7 @@ def configure(self, attributes: Optional[dict] = None) -> GeneralExpectationValu
attr_dtype = cutn.expectation_get_attribute_dtype(attr)
attr_arr = np.asarray(val, dtype=attr_dtype)
cutn.expectation_configure(
self._handle,
self._lib.handle,
self._expectation,
attr,
attr_arr.ctypes.data,
Expand All @@ -372,20 +370,20 @@ def prepare(self, scratch_fraction: float = 0.5) -> GeneralExpectationValue:
self._stream = (
cp.cuda.Stream()
) # In current cuTN release it is unused (could be 0x0)
free_mem = self._dev.mem_info[0]
free_mem = self._lib.dev.mem_info[0]
scratch_size = int(scratch_fraction * free_mem)
self._scratch_space = cp.cuda.alloc(scratch_size)
self._logger.debug(f"Allocated {scratch_size} bytes of scratch memory on GPU")
self._work_desc = cutn.create_workspace_descriptor(self._handle)
self._work_desc = cutn.create_workspace_descriptor(self._lib.handle)
cutn.expectation_prepare(
self._handle,
self._lib.handle,
self._expectation,
scratch_size,
self._work_desc,
self._stream.ptr, # type: ignore
)
workspace_size_d = cutn.workspace_get_memory_size(
self._handle,
self._lib.handle,
self._work_desc,
cutn.WorksizePref.RECOMMENDED,
cutn.Memspace.DEVICE,
Expand All @@ -394,7 +392,7 @@ def prepare(self, scratch_fraction: float = 0.5) -> GeneralExpectationValue:

if workspace_size_d <= scratch_size:
cutn.workspace_set_memory(
self._handle,
self._lib.handle,
self._work_desc,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH,
Expand All @@ -409,15 +407,15 @@ def prepare(self, scratch_fraction: float = 0.5) -> GeneralExpectationValue:
else:
self.destroy()
raise MemoryError(
f"Insufficient workspace size on the GPU device {self._dev.id}"
f"Insufficient workspace size on the GPU device {self._lib.dev.id}"
)

def compute(self) -> tuple[complex, complex]:
"""Computes expectation value."""
expectation_value = np.empty(1, dtype="complex128")
state_norm = np.empty(1, dtype="complex128")
cutn.expectation_compute(
self._handle,
self._lib.handle,
self._expectation,
self._work_desc,
expectation_value.ctypes.data,
Expand Down

0 comments on commit 7fc94cc

Please sign in to comment.