Skip to content

Commit

Permalink
Rename Statetensor to TensorNet (#759)
Browse files Browse the repository at this point in the history
### Before submitting

Please complete the following checklist when submitting a PR:

- [ ] All new features must include a unit test.
If you've fixed a bug or added code that should be tested, add a test to
the
      [`tests`](../tests) directory!

- [ ] All new functions and code must be clearly commented and
documented.
If you do make documentation changes, make sure that the docs build and
      render correctly by running `make docs`.

- [x] Ensure that the test suite passes, by running `make test`.

- [x] Add a new entry to the `.github/CHANGELOG.md` file, summarizing
the
      change, and including a link back to the PR.

- [x] Ensure that code is properly formatted by running `make format`. 

When all the above are checked, delete everything above the dashed
line and fill in the pull request template.


------------------------------------------------------------------------------------------------------------

**Context:**

It will be more clear to type of `MPS` and `Exact TN` objects passed to
`obstncuda` and `measurementtncuda` class since the object is simply a
graph.

**Description of the Change:**

**Benefits:**

**Possible Drawbacks:**

**Related GitHub Issues:**

---------

Co-authored-by: ringo-but-quantum <github-ringo-but-quantum@xanadu.ai>
Co-authored-by: Vincent Michaud-Rioux <vincentm@nanoacademic.com>
Co-authored-by: Amintor Dusko <87949283+AmintorDusko@users.noreply.github.com>
  • Loading branch information
4 people committed Jun 11, 2024
1 parent 547de08 commit f57df46
Show file tree
Hide file tree
Showing 7 changed files with 160 additions and 160 deletions.
3 changes: 3 additions & 0 deletions .github/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@

### Improvements

* Change the type of tensor network objects passed to `ObservablesTNCuda` and `MeasurementsTNCuda` class from `StateTensorT` to `TensorNetT`.
[(#759)](https://github.com/PennyLaneAI/pennylane-lightning/pull/759)

* Rationalize MCM tests, removing most end-to-end tests from the native MCM test file,
but keeping one that validates multiple mid-circuit measurements with any allowed return.
[(#754)](https://github.com/PennyLaneAI/pennylane/pull/754)
Expand Down
2 changes: 1 addition & 1 deletion pennylane_lightning/core/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@
Version number (major.minor.patch[-label])
"""

__version__ = "0.37.0-dev31"
__version__ = "0.37.0-dev32"
Original file line number Diff line number Diff line change
Expand Up @@ -42,21 +42,21 @@ namespace Pennylane::LightningTensor::TNCuda::Measures {
/**
* @brief ObservablesTNCuda's Measurement Class.
*
* This class couples with a state tensor to perform measurements.
* This class couples with a tensor network to perform measurements.
* Observables are defined in the observable class.
*
* @tparam StateTensorT type of the state tensor to be measured.
* @tparam TensorNetT type of the tensor network to be measured.
*/
template <class StateTensorT> class MeasurementsTNCuda {
template <class TensorNetT> class MeasurementsTNCuda {
private:
using PrecisionT = typename StateTensorT::PrecisionT;
using ComplexT = typename StateTensorT::ComplexT;
using PrecisionT = typename TensorNetT::PrecisionT;
using ComplexT = typename TensorNetT::ComplexT;

const StateTensorT &state_tensor_;
const TensorNetT &tensor_network_;

public:
explicit MeasurementsTNCuda(const StateTensorT &state_tensor)
: state_tensor_(state_tensor){};
explicit MeasurementsTNCuda(const TensorNetT &tensor_network)
: tensor_network_(tensor_network){};

/**
* @brief Calculate expectation value for a general Observable.
Expand All @@ -67,24 +67,24 @@ template <class StateTensorT> class MeasurementsTNCuda {
*
* @return Expectation value with respect to the given observable.
*/
auto expval(ObservableTNCuda<StateTensorT> &obs,
auto expval(ObservableTNCuda<TensorNetT> &obs,
const int32_t numHyperSamples = 1) -> PrecisionT {
auto tnoperator =
ObservableTNCudaOperator<StateTensorT>(state_tensor_, obs);
ObservableTNCudaOperator<TensorNetT>(tensor_network_, obs);

ComplexT expectation_val{0.0, 0.0};
ComplexT state_norm2{0.0, 0.0};

cutensornetStateExpectation_t expectation;

PL_CUTENSORNET_IS_SUCCESS(cutensornetCreateExpectation(
/* const cutensornetHandle_t */ state_tensor_.getTNCudaHandle(),
/* cutensornetState_t */ state_tensor_.getQuantumState(),
/* const cutensornetHandle_t */ tensor_network_.getTNCudaHandle(),
/* cutensornetState_t */ tensor_network_.getQuantumState(),
/* cutensornetNetworkOperator_t */ tnoperator.getTNOperator(),
/* cutensornetStateExpectation_t * */ &expectation));

PL_CUTENSORNET_IS_SUCCESS(cutensornetExpectationConfigure(
/* const cutensornetHandle_t */ state_tensor_.getTNCudaHandle(),
/* const cutensornetHandle_t */ tensor_network_.getTNCudaHandle(),
/* cutensornetStateExpectation_t */ expectation,
/* cutensornetExpectationAttributes_t */
CUTENSORNET_EXPECTATION_CONFIG_NUM_HYPER_SAMPLES,
Expand All @@ -93,36 +93,36 @@ template <class StateTensorT> class MeasurementsTNCuda {

cutensornetWorkspaceDescriptor_t workDesc;
PL_CUTENSORNET_IS_SUCCESS(cutensornetCreateWorkspaceDescriptor(
/* const cutensornetHandle_t */ state_tensor_.getTNCudaHandle(),
/* const cutensornetHandle_t */ tensor_network_.getTNCudaHandle(),
/* cutensornetWorkspaceDescriptor_t * */ &workDesc));

const std::size_t scratchSize = cuUtil::getFreeMemorySize() / 2;

// Prepare the specified quantum circuit expectation value for
// computation
PL_CUTENSORNET_IS_SUCCESS(cutensornetExpectationPrepare(
/* const cutensornetHandle_t */ state_tensor_.getTNCudaHandle(),
/* const cutensornetHandle_t */ tensor_network_.getTNCudaHandle(),
/* cutensornetStateExpectation_t */ expectation,
/* size_t maxWorkspaceSizeDevice */ scratchSize,
/* cutensornetWorkspaceDescriptor_t */ workDesc,
/* cudaStream_t [unused] */ 0x0));

std::size_t worksize =
getWorkSpaceMemorySize(state_tensor_.getTNCudaHandle(), workDesc);
getWorkSpaceMemorySize(tensor_network_.getTNCudaHandle(), workDesc);

PL_ABORT_IF(worksize > scratchSize,
"Insufficient workspace size on Device.\n");

const std::size_t d_scratch_length = worksize / sizeof(size_t) + 1;
DataBuffer<size_t, int> d_scratch(d_scratch_length,
state_tensor_.getDevTag(), true);
tensor_network_.getDevTag(), true);

setWorkSpaceMemory(state_tensor_.getTNCudaHandle(), workDesc,
setWorkSpaceMemory(tensor_network_.getTNCudaHandle(), workDesc,
reinterpret_cast<void *>(d_scratch.getData()),
worksize);

PL_CUTENSORNET_IS_SUCCESS(cutensornetExpectationCompute(
/* const cutensornetHandle_t */ state_tensor_.getTNCudaHandle(),
/* const cutensornetHandle_t */ tensor_network_.getTNCudaHandle(),
/* cutensornetStateExpectation_t */ expectation,
/* cutensornetWorkspaceDescriptor_t */ workDesc,
/* void* */ static_cast<void *>(&expectation_val),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,17 +35,17 @@ using namespace Pennylane::LightningTensor::TNCuda::Observables;
/// @endcond

TEMPLATE_TEST_CASE("[Identity]", "[MPSTNCuda_Expval]", float, double) {
using StateTensorT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<StateTensorT>;
using TensorNetT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<TensorNetT>;
auto ONE = TestType(1);

std::size_t bondDim = GENERATE(2, 3, 4, 5);
std::size_t num_qubits = 3;
std::size_t maxBondDim = bondDim;

StateTensorT mps_state{num_qubits, maxBondDim};
TensorNetT mps_state{num_qubits, maxBondDim};

auto measure = MeasurementsTNCuda<StateTensorT>(mps_state);
auto measure = MeasurementsTNCuda<TensorNetT>(mps_state);

SECTION("Using expval") {
mps_state.applyOperations({{"Hadamard"}, {"CNOT"}, {"CNOT"}},
Expand All @@ -60,16 +60,16 @@ TEMPLATE_TEST_CASE("[Identity]", "[MPSTNCuda_Expval]", float, double) {

TEMPLATE_TEST_CASE("[PauliX]", "[MPSTNCuda_Expval]", float, double) {
{
using StateTensorT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<StateTensorT>;
using TensorNetT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<TensorNetT>;

std::size_t bondDim = GENERATE(2, 3, 4, 5);
std::size_t num_qubits = 3;
std::size_t maxBondDim = bondDim;

StateTensorT mps_state{num_qubits, maxBondDim};
TensorNetT mps_state{num_qubits, maxBondDim};

auto measure = MeasurementsTNCuda<StateTensorT>(mps_state);
auto measure = MeasurementsTNCuda<TensorNetT>(mps_state);

auto ZERO = TestType(0);
auto ONE = TestType(1);
Expand Down Expand Up @@ -114,16 +114,16 @@ TEMPLATE_TEST_CASE("[PauliX]", "[MPSTNCuda_Expval]", float, double) {

TEMPLATE_TEST_CASE("[PauliY]", "[MPSTNCuda_Expval]", float, double) {
{
using StateTensorT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<StateTensorT>;
using TensorNetT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<TensorNetT>;

std::size_t bondDim = GENERATE(2, 3, 4, 5);
std::size_t num_qubits = 3;
std::size_t maxBondDim = bondDim;

StateTensorT mps_state{num_qubits, maxBondDim};
TensorNetT mps_state{num_qubits, maxBondDim};

auto measure = MeasurementsTNCuda<StateTensorT>(mps_state);
auto measure = MeasurementsTNCuda<TensorNetT>(mps_state);

auto ZERO = TestType(0);
auto ONE = TestType(1);
Expand Down Expand Up @@ -160,22 +160,22 @@ TEMPLATE_TEST_CASE("[PauliY]", "[MPSTNCuda_Expval]", float, double) {

TEMPLATE_TEST_CASE("[PauliZ]", "[MPSTNCuda_Expval]", float, double) {
{
using StateTensorT = MPSTNCuda<TestType>;
using PrecisionT = StateTensorT::PrecisionT;
using StateTensorT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<StateTensorT>;
using TensorNetT = MPSTNCuda<TestType>;
using PrecisionT = TensorNetT::PrecisionT;
using TensorNetT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<TensorNetT>;

std::size_t bondDim = GENERATE(2, 3, 4, 5);
std::size_t num_qubits = 3;
std::size_t maxBondDim = bondDim;

StateTensorT mps_state{num_qubits, maxBondDim};
TensorNetT mps_state{num_qubits, maxBondDim};

SECTION("Using expval") {
mps_state.applyOperations(
{{"RX"}, {"Hadamard"}, {"Hadamard"}}, {{0}, {1}, {2}},
{{false}, {false}, {false}}, {{0.5}, {}, {}});
auto m = MeasurementsTNCuda<StateTensorT>(mps_state);
auto m = MeasurementsTNCuda<TensorNetT>(mps_state);
auto ob = NamedObsT("PauliZ", {0});
auto res = m.expval(ob);
PrecisionT ref = 0.8775825618903724;
Expand All @@ -186,16 +186,16 @@ TEMPLATE_TEST_CASE("[PauliZ]", "[MPSTNCuda_Expval]", float, double) {

TEMPLATE_TEST_CASE("[Hadamard]", "[MPSTNCuda_Expval]", float, double) {
{
using StateTensorT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<StateTensorT>;
using TensorNetT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<TensorNetT>;

std::size_t bondDim = GENERATE(2, 3, 4, 5);
std::size_t num_qubits = 3;
std::size_t maxBondDim = bondDim;

StateTensorT mps_state{num_qubits, maxBondDim};
TensorNetT mps_state{num_qubits, maxBondDim};

auto measure = MeasurementsTNCuda<StateTensorT>(mps_state);
auto measure = MeasurementsTNCuda<TensorNetT>(mps_state);

auto INVSQRT2 = TestType(0.707106781186547524401);

Expand All @@ -220,16 +220,16 @@ TEMPLATE_TEST_CASE("[Hadamard]", "[MPSTNCuda_Expval]", float, double) {

TEMPLATE_TEST_CASE("[Parametric_obs]", "[MPSTNCuda_Expval]", float, double) {
{
using StateTensorT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<StateTensorT>;
using TensorNetT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<TensorNetT>;

std::size_t bondDim = GENERATE(2, 3, 4, 5);
std::size_t num_qubits = 3;
std::size_t maxBondDim = bondDim;

StateTensorT mps_state{num_qubits, maxBondDim};
TensorNetT mps_state{num_qubits, maxBondDim};

auto measure = MeasurementsTNCuda<StateTensorT>(mps_state);
auto measure = MeasurementsTNCuda<TensorNetT>(mps_state);
auto ONE = TestType(1);

SECTION("Using expval") {
Expand All @@ -245,17 +245,17 @@ TEMPLATE_TEST_CASE("[Parametric_obs]", "[MPSTNCuda_Expval]", float, double) {

TEMPLATE_TEST_CASE("[Hermitian]", "[MPSTNCuda_Expval]", float, double) {
{
using StateTensorT = MPSTNCuda<TestType>;
using ComplexT = typename StateTensorT::ComplexT;
using HermitianObsT = HermitianObsTNCuda<StateTensorT>;
using TensorNetT = MPSTNCuda<TestType>;
using ComplexT = typename TensorNetT::ComplexT;
using HermitianObsT = HermitianObsTNCuda<TensorNetT>;

std::size_t bondDim = GENERATE(2, 3, 4, 5);
std::size_t num_qubits = 3;
std::size_t maxBondDim = bondDim;

StateTensorT mps_state{num_qubits, maxBondDim};
TensorNetT mps_state{num_qubits, maxBondDim};

auto measure = MeasurementsTNCuda<StateTensorT>(mps_state);
auto measure = MeasurementsTNCuda<TensorNetT>(mps_state);

auto ZERO = TestType(0);
auto ONE = TestType(1);
Expand Down Expand Up @@ -303,22 +303,22 @@ TEMPLATE_TEST_CASE("[Hermitian]", "[MPSTNCuda_Expval]", float, double) {

TEMPLATE_TEST_CASE("Test expectation value of TensorProdObs",
"[MPSTNCuda_Expval]", float, double) {
using StateTensorT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<StateTensorT>;
using TensorProdObsT = TensorProdObsTNCuda<StateTensorT>;
using TensorNetT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<TensorNetT>;
using TensorProdObsT = TensorProdObsTNCuda<TensorNetT>;
auto ZERO = TestType(0);
auto INVSQRT2 = TestType(0.707106781186547524401);
SECTION("Using XZ") {
std::size_t bondDim = GENERATE(2);
std::size_t num_qubits = 3;
std::size_t maxBondDim = bondDim;

StateTensorT mps_state{num_qubits, maxBondDim};
TensorNetT mps_state{num_qubits, maxBondDim};

mps_state.applyOperations({{"Hadamard"}, {"Hadamard"}, {"Hadamard"}},
{{0}, {1}, {2}}, {{false}, {false}, {false}});

auto m = MeasurementsTNCuda<StateTensorT>(mps_state);
auto m = MeasurementsTNCuda<TensorNetT>(mps_state);

auto X0 =
std::make_shared<NamedObsT>("PauliX", std::vector<std::size_t>{0});
Expand All @@ -335,12 +335,12 @@ TEMPLATE_TEST_CASE("Test expectation value of TensorProdObs",
std::size_t num_qubits = 3;
std::size_t maxBondDim = bondDim;

StateTensorT mps_state{num_qubits, maxBondDim};
TensorNetT mps_state{num_qubits, maxBondDim};

mps_state.applyOperations({{"Hadamard"}, {"Hadamard"}, {"Hadamard"}},
{{0}, {1}, {2}}, {{false}, {false}, {false}});

auto m = MeasurementsTNCuda<StateTensorT>(mps_state);
auto m = MeasurementsTNCuda<TensorNetT>(mps_state);

auto H0 = std::make_shared<NamedObsT>("Hadamard",
std::vector<std::size_t>{0});
Expand All @@ -357,21 +357,21 @@ TEMPLATE_TEST_CASE("Test expectation value of TensorProdObs",

TEMPLATE_TEST_CASE("Test expectation value of HamiltonianObs",
"[MPSTNCuda_Expval]", float, double) {
using StateTensorT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<StateTensorT>;
using HamiltonianObsT = HamiltonianTNCuda<StateTensorT>;
using TensorNetT = MPSTNCuda<TestType>;
using NamedObsT = NamedObsTNCuda<TensorNetT>;
using HamiltonianObsT = HamiltonianTNCuda<TensorNetT>;
auto ONE = TestType(1);
SECTION("Using XZ") {
std::size_t bondDim = GENERATE(2);
std::size_t num_qubits = 3;
std::size_t maxBondDim = bondDim;

StateTensorT mps_state{num_qubits, maxBondDim};
TensorNetT mps_state{num_qubits, maxBondDim};

mps_state.applyOperations({{"Hadamard"}, {"Hadamard"}, {"Hadamard"}},
{{0}, {1}, {2}}, {{false}, {false}, {false}});

auto m = MeasurementsTNCuda<StateTensorT>(mps_state);
auto m = MeasurementsTNCuda<TensorNetT>(mps_state);

auto X0 =
std::make_shared<NamedObsT>("PauliX", std::vector<std::size_t>{0});
Expand Down
Loading

0 comments on commit f57df46

Please sign in to comment.