Skip to content

Commit

Permalink
refactored tests
Browse files Browse the repository at this point in the history
  • Loading branch information
vytautas-a committed Nov 20, 2023
1 parent a49c287 commit fa50c8f
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 24 deletions.
4 changes: 3 additions & 1 deletion qadence/analog/interaction.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,4 +242,6 @@ def _(
interaction: Callable,
pattern: Union[AddressingPattern, None],
) -> AbstractBlock:
return chain(add_interaction(register, b, interaction=interaction) for b in block.blocks)
return chain(
add_interaction(register, b, interaction=interaction, pattern=pattern) for b in block.blocks
)
51 changes: 28 additions & 23 deletions tests/analog/test_patterns.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,19 @@

from qadence import (
AnalogRX,
AnalogRY,
BackendName,
DifferentiableBackend,
DiffMode,
Parameter,
QuantumCircuit,
QuantumModel,
Register,
chain,
total_magnetization,
)
from qadence.analog.addressing import AddressingPattern
from qadence.analog.interaction import add_interaction
from qadence.backends.pulser.backend import Backend as PulserBackend
from qadence.backends.pulser.config import Configuration
from qadence.backends.pyqtorch.backend import Backend as PyqBackend


@pytest.mark.parametrize(
Expand All @@ -31,8 +31,10 @@
)
def test_pulser_pyq_addressing(amp: float, det: float, spacing: float) -> None:
n_qubits = 3
block = AnalogRX("x")
circ = QuantumCircuit(n_qubits, block)
x = Parameter("x")
block = chain(AnalogRX(3 * x), AnalogRY(0.5 * x))
reg = Register(support=n_qubits, spacing=spacing)
circ = QuantumCircuit(reg, block)

# define addressing patterns
rand_weights_amp = torch.rand(n_qubits)
Expand All @@ -51,30 +53,32 @@ def test_pulser_pyq_addressing(amp: float, det: float, spacing: float) -> None:

values = {"x": torch.linspace(0.5, 2 * torch.pi, 50)}
obs = total_magnetization(n_qubits)
conf = Configuration(addressing_pattern=p, spacing=spacing)
conf = Configuration(addressing_pattern=p)

# define pulser backend
pulser_backend = PulserBackend(config=conf) # type: ignore[arg-type]
conv = pulser_backend.convert(circ, obs)
pulser_circ, pulser_obs, embedding_fn, params = conv
diff_backend = DifferentiableBackend(pulser_backend, diff_mode=DiffMode.GPSR)
expval_pulser = diff_backend.expectation(pulser_circ, pulser_obs, embedding_fn(params, values))
model = QuantumModel(
circuit=circ,
observable=obs,
backend=BackendName.PULSER,
diff_mode=DiffMode.GPSR,
configuration=conf,
)
expval_pulser = model.expectation(values=values)

# define pyq backend
int_circ = add_interaction(circ, spacing=spacing, pattern=p)
pyq_backend = PyqBackend() # type: ignore[arg-type]
conv = pyq_backend.convert(int_circ, obs)
pyq_circ, pyq_obs, embedding_fn, params = conv
diff_backend = DifferentiableBackend(pyq_backend, diff_mode=DiffMode.AD)
expval_pyq = diff_backend.expectation(pyq_circ, pyq_obs, embedding_fn(params, values))
int_circ = add_interaction(circ, pattern=p)
model = QuantumModel(
circuit=int_circ, observable=obs, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD
)
expval_pyq = model.expectation(values=values)

torch.allclose(expval_pulser, expval_pyq, atol=ATOL_DICT[BackendName.PULSER])


@pytest.mark.flaky(max_runs=10)
def test_addressing_training() -> None:
n_qubits = 3
spacing = 8
reg = Register(support=n_qubits, spacing=8)
f_value = torch.rand(1)

# define training parameters
Expand All @@ -91,17 +95,18 @@ def test_addressing_training() -> None:
)

# define training circuit
circ = QuantumCircuit(n_qubits, AnalogRX(1 + torch.rand(1).item()))
circ = add_interaction(circ, spacing=spacing, pattern=p)
block = chain(AnalogRX(1 + torch.rand(1).item()), AnalogRY(1 + torch.rand(1).item()))
circ = QuantumCircuit(reg, block)
circ = add_interaction(circ, pattern=p)

# define quantum model
obs = total_magnetization(n_qubits)
model = QuantumModel(circuit=circ, observable=obs)
model = QuantumModel(circuit=circ, observable=obs, backend=BackendName.PYQTORCH)

# prepare for training
optimizer = torch.optim.Adam(model.parameters(), lr=0.25)
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
loss_criterion = torch.nn.MSELoss()
n_epochs = 100
n_epochs = 200
loss_save = []

# train model
Expand Down

0 comments on commit fa50c8f

Please sign in to comment.