Skip to content

Commit

Permalink
Merge branch 'main' into ve/refresh_fms
Browse files Browse the repository at this point in the history
  • Loading branch information
jpmoutinho committed Oct 16, 2023
2 parents ba1b4a9 + 1efd5d9 commit 6e04ca4
Show file tree
Hide file tree
Showing 6 changed files with 269 additions and 244 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test_fast.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out Qadence
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
ref: ${{ github.ref }}
- name: Set up Python
Expand Down
67 changes: 36 additions & 31 deletions docs/digital_analog_qc/analog-basics.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ where $\Omega$ is the Rabi frequency, $\delta$ is the detuning, $\hat n = \frac{
- [`WaitBlock`][qadence.blocks.analog.WaitBlock] by free-evolving $\mathcal{H}_{\textrm{int}}$
- [`ConstantAnalogRotation`][qadence.blocks.analog.ConstantAnalogRotation] by free-evolving $\mathcal{H}$


The `wait` operation can be emulated with an $ZZ$- (Ising) or an $XY$-interaction:

```python exec="on" source="material-block" result="json"
Expand Down Expand Up @@ -97,11 +96,8 @@ print(f"Analog Kron block = {analog_kron}") # markdown-exec: hide

## Fitting a simple function

Analog blocks can indeed be parametrized to, for instance, create small ansatze to fit a sine function. When using the `pyqtorch` backend the
`add_interaction` function is called automatically. As usual, we can choose which
differentiation backend we want to use: autodiff or parameter shift rule (PSR).
Analog blocks can be parametrized in the usual Qadence manner. Like any other parameters, they can be optimized. The next snippet examplifies the creation of an analog and paramertized ansatze to fit a sine function. First, define an ansatz block and an observable:

First we define an ansatz block and an observable
```python exec="on" source="material-block" session="sin"
import torch
from qadence import Register, FeatureParameter, VariationalParameter
Expand All @@ -110,19 +106,20 @@ from qadence import wait, chain, add

pi = torch.pi

# two qubit register
# A two qubit register.
reg = Register.from_coordinates([(0, 0), (0, 12)])

# analog ansatz with input parameter
# An analog ansatz with an input time parameter.
t = FeatureParameter("t")

block = chain(
AnalogRX(pi / 2),
AnalogRX(pi/2.),
AnalogRZ(t),
wait(1000 * VariationalParameter("theta", value=0.5)),
AnalogRX(pi / 2),
AnalogRX(pi/2),
)

# observable
# Total magnetization observable.
obs = add(Z(i) for i in range(reg.n_qubits))
```

Expand All @@ -139,58 +136,66 @@ obs = add(Z(i) for i in range(reg.n_qubits))
ax.scatter(xnp, ynp, **kwargs)
```

Then we define the dataset we want to train on and plot the initial prediction.
Next, define the dataset to train on and plot the initial prediction. The differentiation mode can be set to either `DiffMode.AD` or `DiffMode.GPSR`.

```python exec="on" source="material-block" html="1" result="json" session="sin"
import matplotlib.pyplot as plt
from qadence import QuantumCircuit, QuantumModel
from qadence import QuantumCircuit, QuantumModel, DiffMode

# define quantum model; including digital-analog emulation
# Define a quantum model including digital-analog emulation.
circ = QuantumCircuit(reg, block)
model = QuantumModel(circ, obs, diff_mode="gpsr")
model = QuantumModel(circ, obs, diff_mode=DiffMode.GPSR)

# Time support dataset.
x_train = torch.linspace(0, 6, steps=30)
# Function to fit.
y_train = -0.64 * torch.sin(x_train + 0.33) + 0.1
# Initial prediction.
y_pred_initial = model.expectation({"t": x_train})

fig, ax = plt.subplots()
scatter(ax, x_train, y_train, label="Training points", marker="o", color="green")
plot(ax, x_train, y_pred_initial, label="Initial prediction")
plt.legend()
fig, ax = plt.subplots() # markdown-exec: hide
plt.xlabel("Time [μs]") # markdown-exec: hide
plt.ylabel("Sin [arb.]") # markdown-exec: hide
scatter(ax, x_train, y_train, label="Training points", marker="o", color="green") # markdown-exec: hide
plot(ax, x_train, y_pred_initial, label="Initial prediction") # markdown-exec: hide
plt.legend() # markdown-exec: hide
from docs import docsutils # markdown-exec: hide
print(docsutils.fig_to_html(fig)) # markdown-exec: hide
```

The rest is the usual PyTorch training routine.
Finally, the classical optimization part is handled by PyTorch:

```python exec="on" source="material-block" html="1" result="json" session="sin"

# Use PyTorch built-in functionality.
mse_loss = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=5e-2)


# Define a loss function.
def loss_fn(x_train, y_train):
return mse_loss(model.expectation({"t": x_train}).squeeze(), y_train)


# train
# Number of epochs to train over.
n_epochs = 200

# Optimization loop.
for i in range(n_epochs):
optimizer.zero_grad()

loss = loss_fn(x_train, y_train)
loss.backward()
optimizer.step()

# if (i + 1) % 10 == 0:
# print(f"Epoch {i+1:0>3} - Loss: {loss.item()}\n")

# visualize
# Get and visualize the final prediction.
y_pred = model.expectation({"t": x_train})

fig, ax = plt.subplots()
scatter(ax, x_train, y_train, label="Training points", marker="o", color="green")
plot(ax, x_train, y_pred_initial, label="Initial prediction")
plot(ax, x_train, y_pred, label="Final prediction")
plt.legend()
fig, ax = plt.subplots() # markdown-exec: hide
plt.xlabel("Time [μs]") # markdown-exec: hide
plt.ylabel("Sin [arb.]") # markdown-exec: hide
scatter(ax, x_train, y_train, label="Training points", marker="o", color="green") # markdown-exec: hide
plot(ax, x_train, y_pred_initial, label="Initial prediction") # markdown-exec: hide
plot(ax, x_train, y_pred, label="Final prediction") # markdown-exec: hide
plt.legend() # markdown-exec: hide
from docs import docsutils # markdown-exec: hide
print(docsutils.fig_to_html(fig)) # markdown-exec: hide
assert loss_fn(x_train, y_train) < 0.05 # markdown-exec: hide
Expand Down
128 changes: 68 additions & 60 deletions docs/digital_analog_qc/analog-qubo.md
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
In this notebook we solve a quadratic unconstrained optimization problem with
`qadence` emulated analog interface using the QAOA variational algorithm. The
Qadence emulated analog interface using the QAOA variational algorithm. The
problem is detailed in the Pulser documentation
[here](https://pulser.readthedocs.io/en/stable/tutorials/qubo.html).

## Define and solve QUBO

??? note "Construct QUBO register (defines `qubo_register_coords` function)"
??? note "Pre-requisite: construct QUBO register"
Before we start we have to define a register that fits into our device.
```python exec="on" source="material-block" session="qubo"
import torch
Expand Down Expand Up @@ -55,7 +56,6 @@ problem is detailed in the Pulser documentation
```


## Define and solve QUBO

```python exec="on" source="material-block" session="qubo"
import matplotlib.pyplot as plt
Expand All @@ -70,20 +70,21 @@ np.random.seed(seed)
torch.manual_seed(seed)
```

The QUBO is defined by weighted connections `Q` and a cost function.
The QUBO problem is initially defined by a graph of weighted connections `Q` and a cost function.

```python exec="on" source="material-block" session="qubo"
def cost_colouring(bitstring, Q):
z = np.array(list(bitstring), dtype=int)
cost = z.T @ Q @ z
return cost


# Cost function.
def cost_fn(counter, Q):
cost = sum(counter[key] * cost_colouring(key, Q) for key in counter)
return cost / sum(counter.values()) # Divide by total samples


# Weights.
Q = np.array(
[
[-10.0, 19.7365809, 19.7365809, 5.42015853, 5.42015853],
Expand All @@ -95,36 +96,42 @@ Q = np.array(
)
```

Build a register from graph extracted from the QUBO exactly
as you would do with Pulser.
Now, build a weighted register graph from the QUBO definition similarly to what is
done in Pulser.

```python exec="on" source="material-block" session="qubo"
reg = Register.from_coordinates(qubo_register_coords(Q))
```

The analog circuit is composed of two global rotations per layer. The first
rotation corresponds to the mixing Hamiltonian and the second one to the
embedding Hamiltonian. Subsequently we add the Ising interaction term to
emulate the analog circuit. This uses a principal quantum number n=70 for the
Rydberg level under the hood.
embedding Hamiltonian in the QAOA algorithm. Subsequently, there is an Ising interaction term to
emulate the analog circuit. Please note that the Rydberg level is set to 70.

```python exec="on" source="material-block" result="json" session="qubo"
from qadence.transpile.emulate import ising_interaction

LAYERS = 2
block = chain(*[AnalogRX(f"t{i}") * AnalogRZ(f"s{i}") for i in range(LAYERS)])
layers = 2
block = chain(*[AnalogRX(f"t{i}") * AnalogRZ(f"s{i}") for i in range(layers)])

emulated = add_interaction(
reg, block, interaction=lambda r, ps: ising_interaction(r, ps, rydberg_level=70)
)
print(emulated)
print(f"emulated = \n") # markdown-exec: hide
print(emulated) # markdown-exec: hide
```

Sample the model to get the initial solution.
```python exec="on" source="material-block" session="qubo"
Next, an initial solution is computed by sampling the model:

```python exec="on" source="material-block" result="json" session="qubo"
model = QuantumModel(QuantumCircuit(reg, emulated), backend="pyqtorch", diff_mode='gpsr')
initial_counts = model.sample({}, n_shots=1000)[0]

print(f"initial_counts = {initial_counts}") # markdown-exec: hide
```

The loss function is defined by averaging over the evaluated bitstrings.
Then, the loss function is defined by averaging over the evaluated bitstrings.

```python exec="on" source="material-block" session="qubo"
def loss(param, *args):
Q = args[0]
Expand All @@ -133,63 +140,64 @@ def loss(param, *args):
C = model.sample({}, n_shots=1000)[0]
return cost_fn(C, Q)
```
Here we use a gradient-free optimization loop for reaching the optimal solution.

And a gradient-free optimization loop is used to compute the optimal solution.

```python exec="on" source="material-block" result="json" session="qubo"
#
# Optimization loop.
for i in range(20):
try:
res = minimize(
loss,
args=Q,
x0=np.random.uniform(1, 10, size=2 * LAYERS),
method="COBYLA",
tol=1e-8,
options={"maxiter": 20},
)
except Exception:
pass

# sample the optimal solution
res = minimize(
loss,
args=Q,
x0=np.random.uniform(1, 10, size=2 * layers),
method="COBYLA",
tol=1e-8,
options={"maxiter": 20},
)

# Sample and visualize the optimal solution.
model.reset_vparams(res.x)
optimal_count_dict = model.sample({}, n_shots=1000)[0]
print(optimal_count_dict)
optimal_count = model.sample({}, n_shots=1000)[0]
print(f"optimal_count = {optimal_count}") # markdown-exec: hide
```

Finally, plot the solution:

```python exec="on" source="material-block" html="1" session="qubo"
fig, axs = plt.subplots(1, 2, figsize=(12, 4))
fig, axs = plt.subplots(1, 2, figsize=(12, 4)) # markdown-exec: hide

# known solutions to the QUBO
# Known solutions to the QUBO problem.
solution_bitstrings=["01011", "00111"]

n_to_show = 20
xs, ys = zip(*sorted(
initial_counts.items(),
key=lambda item: item[1],
reverse=True
))
colors = ["r" if x in solution_bitstrings else "g" for x in xs]

axs[0].set_xlabel("bitstrings")
axs[0].set_ylabel("counts")
axs[0].bar(xs[:n_to_show], ys[:n_to_show], width=0.5, color=colors)
axs[0].tick_params(axis="x", labelrotation=90)
axs[0].set_title("Initial solution")

xs, ys = zip(*sorted(optimal_count_dict.items(),
key=lambda item: item[1],
reverse=True
))
n_to_show = 20 # markdown-exec: hide
xs, ys = zip(*sorted( # markdown-exec: hide
initial_counts.items(), # markdown-exec: hide
key=lambda item: item[1], # markdown-exec: hide
reverse=True # markdown-exec: hide
)) # markdown-exec: hide
colors = ["r" if x in solution_bitstrings else "g" for x in xs] # markdown-exec: hide

axs[0].set_xlabel("bitstrings") # markdown-exec: hide
axs[0].set_ylabel("counts") # markdown-exec: hide
axs[0].bar(xs[:n_to_show], ys[:n_to_show], width=0.5, color=colors) # markdown-exec: hide
axs[0].tick_params(axis="x", labelrotation=90) # markdown-exec: hide
axs[0].set_title("Initial solution") # markdown-exec: hide

xs, ys = zip(*sorted(optimal_count.items(), # markdown-exec: hide
key=lambda item: item[1], # markdown-exec: hide
reverse=True # markdown-exec: hide
)) # markdown-exec: hide
# xs = list(xs) # markdown-exec: hide
# assert (xs[0] == "01011" and xs[1] == "00111") or (xs[1] == "01011" and xs[0] == "00111"), print(f"{xs=}") # markdown-exec: hide

colors = ["r" if x in solution_bitstrings else "g" for x in xs]
colors = ["r" if x in solution_bitstrings else "g" for x in xs] # markdown-exec: hide

axs[1].set_xlabel("bitstrings")
axs[1].set_ylabel("counts")
axs[1].bar(xs[:n_to_show], ys[:n_to_show], width=0.5, color=colors)
axs[1].tick_params(axis="x", labelrotation=90)
axs[1].set_title("Optimal solution")
plt.tight_layout()
axs[1].set_xlabel("bitstrings") # markdown-exec: hide
axs[1].set_ylabel("counts") # markdown-exec: hide
axs[1].bar(xs[:n_to_show], ys[:n_to_show], width=0.5, color=colors) # markdown-exec: hide
axs[1].tick_params(axis="x", labelrotation=90) # markdown-exec: hide
axs[1].set_title("Optimal solution") # markdown-exec: hide
plt.tight_layout() # markdown-exec: hide
from docs import docsutils # markdown-exec: hide
print(docsutils.fig_to_html(fig)) # markdown-exec: hide
```
Loading

0 comments on commit 6e04ca4

Please sign in to comment.