Skip to content

Commit f2b5ea4

Browse files
committed
update the refacor of examples! huge refactor!
1 parent f4cbbea commit f2b5ea4

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

67 files changed

+1401
-1309
lines changed

examples/.gitignore

Lines changed: 0 additions & 1 deletion
This file was deleted.

examples/apicomparison/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
1-
API comparison on the same standard variational circuit evaluation task demonstrating the advantage of TensorCircuit API design.
1+
API comparison on the same standard variational circuit evaluation task demonstrating the advantage of TyxonQ API design.
22

3-
* QML subtask refers building a keras model of quantum circuit.
3+
* QML subtask refers building a pytorch model of quantum circuit.
44

55
* VQE subtask refers getting energy and circuit gradients.
66

7-
| # Lines (# Packages) | TensorFlow Quantum | Pennylane | TensorCircuit |
7+
| # Lines (# Packages) | TensorFlow Quantum | Pennylane | TyxonQ |
88
| :------------------: | :----------------: | :-------: | :-----------: |
99
| QML subtask | 32 (5) | 18 (2) | 16 (1) |
1010
| VQE subtask | 47 (5) | 29 (2) | 20 (1) |

examples/circuit_compiler.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
"""
2-
compilation utilities in tensorcircuit
2+
compilation utilities in tyxonq
33
"""
44

5-
import tensorcircuit as tc
5+
import tyxonq as tq
66

77

8-
c = tc.Circuit(3)
8+
c = tq.Circuit(3)
99
c.rx(0, theta=0.2)
1010
c.rz(0, theta=-0.3)
1111
c.ry(1, theta=0.1)
@@ -17,24 +17,24 @@
1717
c.rxx(1, 2, theta=1.7)
1818

1919

20-
c0, _ = tc.compiler.qiskit_compiler.qiskit_compile(
20+
c0, _ = tq.compiler.qiskit_compiler.qiskit_compile(
2121
c,
2222
compiled_options={"optimization_level": 0, "basis_gates": ["cx", "cz", "h", "rz"]},
2323
)
2424

25-
c1, _ = tc.compiler.qiskit_compiler.qiskit_compile(
25+
c1, _ = tq.compiler.qiskit_compiler.qiskit_compile(
2626
c,
2727
compiled_options={"optimization_level": 1, "basis_gates": ["cx", "cz", "h", "rz"]},
2828
)
2929

3030

31-
c2, _ = tc.compiler.qiskit_compiler.qiskit_compile(
31+
c2, _ = tq.compiler.qiskit_compiler.qiskit_compile(
3232
c,
3333
compiled_options={"optimization_level": 2, "basis_gates": ["cx", "cz", "h", "rz"]},
3434
)
3535

3636

37-
c3, _ = tc.compiler.qiskit_compiler.qiskit_compile(
37+
c3, _ = tq.compiler.qiskit_compiler.qiskit_compile(
3838
c,
3939
compiled_options={"optimization_level": 3, "basis_gates": ["cx", "cz", "h", "rz"]},
4040
)
@@ -52,14 +52,14 @@
5252
print(c3.draw())
5353

5454

55-
compiler_wo_mapping = tc.compiler.DefaultCompiler()
55+
compiler_wo_mapping = tq.compiler.DefaultCompiler()
5656
c4, _ = compiler_wo_mapping(c)
5757
print(
58-
"compiled with tc default compiler: combining the good from qiskit and our tc own"
58+
"compiled with tq default compiler: combining the good from qiskit and our tq own"
5959
)
6060
# we always uuggest using DefaultCompiler for tasks on qcloud
6161
# internally we run optimized compiling using U3 basis with qiskit which has good performance
62-
# and we unroll u3 with rz and apply replace/prune/merge loop developed in tc to further optimize the circuit
62+
# and we unroll u3 with rz and apply replace/prune/merge loop developed in tq to further optimize the circuit
6363
print(c4.draw())
6464

6565
print("gate number comparison (last ours vs before qiskit (0, 1, 2, 3))")
@@ -68,7 +68,7 @@
6868

6969
# if we want to apply routing/qubit mapping
7070

71-
compiler_w_mapping = tc.compiler.DefaultCompiler(
71+
compiler_w_mapping = tq.compiler.DefaultCompiler(
7272
{"coupling_map": [[0, 2], [2, 0], [1, 0], [0, 1]]}
7373
)
7474
c5, info = compiler_w_mapping(c)

examples/clifford_optimization.py

Lines changed: 44 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -3,19 +3,19 @@
33
"""
44

55
import numpy as np
6-
import tensorflow as tf
6+
import torch
77

8-
import tensorcircuit as tc
8+
import tyxonq as tq
99

10-
ctype, rtype = tc.set_dtype("complex64")
11-
K = tc.set_backend("tensorflow")
10+
ctype, rtype = tq.set_dtype("complex64")
11+
K = tq.set_backend("pytorch")
1212

1313
n = 6
1414
nlayers = 6
1515

1616

1717
def ansatz(structureo, structuret, preprocess="direct"):
18-
c = tc.Circuit(n)
18+
c = tq.Circuit(n)
1919
if preprocess == "softmax":
2020
structureo = K.softmax(structureo, axis=-1)
2121
structuret = K.softmax(structuret, axis=-1)
@@ -28,30 +28,30 @@ def ansatz(structureo, structuret, preprocess="direct"):
2828
structureo = K.cast(structureo, ctype)
2929
structuret = K.cast(structuret, ctype)
3030

31-
structureo = tf.reshape(structureo, shape=[nlayers, n, 7])
32-
structuret = tf.reshape(structuret, shape=[nlayers, n, 3])
31+
structureo = torch.reshape(structureo, shape=[nlayers, n, 7])
32+
structuret = torch.reshape(structuret, shape=[nlayers, n, 3])
3333

3434
for i in range(n):
35-
c.H(i)
35+
c.h(i)
3636
for j in range(nlayers):
3737
for i in range(n):
3838
c.unitary(
3939
i,
40-
unitary=structureo[j, i, 0] * tc.gates.i().tensor
41-
+ structureo[j, i, 1] * tc.gates.x().tensor
42-
+ structureo[j, i, 2] * tc.gates.y().tensor
43-
+ structureo[j, i, 3] * tc.gates.z().tensor
44-
+ structureo[j, i, 4] * tc.gates.h().tensor
45-
+ structureo[j, i, 5] * tc.gates.s().tensor
46-
+ structureo[j, i, 6] * tc.gates.sd().tensor,
40+
unitary=structureo[j, i, 0] * tq.gates.i().tensor
41+
+ structureo[j, i, 1] * tq.gates.x().tensor
42+
+ structureo[j, i, 2] * tq.gates.y().tensor
43+
+ structureo[j, i, 3] * tq.gates.z().tensor
44+
+ structureo[j, i, 4] * tq.gates.h().tensor
45+
+ structureo[j, i, 5] * tq.gates.s().tensor
46+
+ structureo[j, i, 6] * tq.gates.sd().tensor,
4747
)
4848
for i in range(n - 1):
4949
c.unitary(
5050
i,
5151
i + 1,
52-
unitary=structuret[j, i, 0] * tc.gates.ii().tensor
53-
+ structuret[j, i, 1] * tc.gates.cnot().tensor
54-
+ structuret[j, i, 2] * tc.gates.cz().tensor,
52+
unitary=structuret[j, i, 0] * tq.gates.ii().tensor
53+
+ structuret[j, i, 1] * tq.gates.cnot().tensor
54+
+ structuret[j, i, 2] * tq.gates.cz().tensor,
5555
)
5656
# loss = K.real(
5757
# sum(
@@ -60,7 +60,7 @@ def ansatz(structureo, structuret, preprocess="direct"):
6060
# )
6161
# )
6262
s = c.state()
63-
loss = -K.real(tc.quantum.entropy(tc.quantum.reduced_density_matrix(s, cut=n // 2)))
63+
loss = -K.real(tq.quantum.entropy(tq.quantum.reduced_density_matrix(s, cut=n // 2)))
6464
return loss
6565

6666

@@ -77,6 +77,7 @@ def sampling_from_structure(structures, batch=1):
7777
return r.transpose()
7878

7979

80+
# warning pytorch might be unable to do this exactly
8081
@K.jit
8182
def best_from_structure(structures):
8283
return K.argmax(structures, axis=-1)
@@ -89,31 +90,30 @@ def nmf_gradient(structures, oh):
8990
choice = K.argmax(oh, axis=-1)
9091
prob = K.softmax(K.real(structures), axis=-1)
9192
indices = K.transpose(
92-
K.stack([K.cast(tf.range(structures.shape[0]), "int64"), choice])
93+
K.stack([K.cast(torch.arange(structures.shape[0]), "int64"), choice])
9394
)
94-
prob = tf.gather_nd(prob, indices)
95+
prob = torch.gather(prob, 0, indices.unsqueeze(0)).squeeze(0)
9596
prob = K.reshape(prob, [-1, 1])
9697
prob = K.tile(prob, [1, structures.shape[-1]])
9798

98-
return K.real(
99-
tf.tensor_scatter_nd_add(
100-
tf.cast(-prob, dtype=ctype),
101-
indices,
102-
tf.ones([structures.shape[0]], dtype=ctype),
103-
)
104-
)
99+
# warning pytorch might be unable to do this exactly
100+
result = torch.zeros_like(structures, dtype=ctype)
101+
result.scatter_add_(0, indices, torch.ones([structures.shape[0]], dtype=ctype))
102+
return K.real(result - prob)
105103

106104

105+
# warning pytorch might be unable to do this exactly
107106
nmf_gradient_vmap = K.jit(K.vmap(nmf_gradient, vectorized_argnums=1))
107+
# warning pytorch might be unable to do this exactly
108108
vf = K.jit(K.vmap(ansatz, vectorized_argnums=(0, 1)), static_argnums=2)
109109

110110

111111
def main(stddev=0.05, lr=None, epochs=2000, debug_step=50, batch=256, verbose=False):
112112
so = K.implicit_randn([nlayers * n, 7], stddev=stddev)
113113
st = K.implicit_randn([nlayers * n, 3], stddev=stddev)
114114
if lr is None:
115-
lr = tf.keras.optimizers.schedules.ExponentialDecay(0.06, 1000, 0.5)
116-
structure_opt = tc.backend.optimizer(tf.keras.optimizers.Adam(lr))
115+
lr = 0.06 # Simplified learning rate
116+
structure_opt = torch.optim.Adam([so, st], lr=lr)
117117

118118
avcost = 0
119119
avcost2 = 0
@@ -135,23 +135,28 @@ def main(stddev=0.05, lr=None, epochs=2000, debug_step=50, batch=256, verbose=Fa
135135

136136
# go = [(vs[i] - avcost2) * go[i] for i in range(batch)]
137137
# gt = [(vs[i] - avcost2) * gt[i] for i in range(batch)]
138-
# go = tf.math.reduce_mean(go, axis=0)
139-
# gt = tf.math.reduce_mean(gt, axis=0)
138+
# go = torch.math.reduce_mean(go, axis=0)
139+
# gt = torch.math.reduce_mean(gt, axis=0)
140140
avcost2 = avcost
141141

142-
[so, st] = structure_opt.update([go, gt], [so, st])
142+
# Update parameters using PyTorch optimizer
143+
structure_opt.zero_grad()
144+
so.grad = go
145+
st.grad = gt
146+
structure_opt.step()
147+
143148
# so -= K.reshape(K.mean(so, axis=-1), [-1, 1])
144149
# st -= K.reshape(K.mean(st, axis=-1), [-1, 1])
145150
if epoch % debug_step == 0 or epoch == epochs - 1:
146151
print("----------epoch %s-----------" % epoch)
147152
print(
148153
"batched average loss: ",
149-
np.mean(vs),
154+
np.mean(vs.detach().cpu().numpy()),
150155
"minimum candidate loss: ",
151-
np.min(vs),
156+
np.min(vs.detach().cpu().numpy()),
152157
)
153-
minp1 = tf.math.reduce_min(tf.math.reduce_max(tf.math.softmax(st), axis=-1))
154-
minp2 = tf.math.reduce_min(tf.math.reduce_max(tf.math.softmax(so), axis=-1))
158+
minp1 = torch.min(torch.max(torch.softmax(st, dim=-1), dim=-1)[0])
159+
minp2 = torch.min(torch.max(torch.softmax(so, dim=-1), dim=-1)[0])
155160
if minp1 > 0.3 and minp2 > 0.6:
156161
print("probability converged")
157162
break
@@ -161,9 +166,9 @@ def main(stddev=0.05, lr=None, epochs=2000, debug_step=50, batch=256, verbose=Fa
161166
print(st)
162167
print(
163168
"strcuture parameter: \n",
164-
so.numpy(),
169+
so.detach().cpu().numpy(),
165170
"\n",
166-
st.numpy(),
171+
st.detach().cpu().numpy(),
167172
)
168173

169174
cand_preseto = best_from_structure(so)

examples/cotengra_setting_bench.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import numpy as np
1212

1313
sys.path.insert(0, "../")
14-
import tensorcircuit as tc
14+
import tyxonq as tq
1515

1616
try:
1717
import kahypar
@@ -28,16 +28,16 @@
2828
"ordered indices corresponding to array axes.",
2929
)
3030

31-
K = tc.set_backend("jax")
31+
K = tq.set_backend("pytorch")
3232

3333

3434
def generate_circuit(param, g, n, nlayers):
3535
# construct the circuit ansatz
36-
c = tc.Circuit(n)
36+
c = tq.Circuit(n)
3737
for i in range(n):
38-
c.H(i)
38+
c.h(i)
3939
for j in range(nlayers):
40-
c = tc.templates.blocks.QAOA_block(c, g, param[j, 0], param[j, 1])
40+
c = tq.templates.blocks.QAOA_block(c, g, param[j, 0], param[j, 1])
4141
return c
4242

4343

@@ -55,6 +55,7 @@ def loss_f(params, n, nlayers):
5555
params = K.implicit_randn(shape=[nlayers, 2])
5656

5757
# run only once to trigger the compilation
58+
# warning pytorch might be unable to do this exactly
5859
K.jit(
5960
loss_f,
6061
static_argnums=(1, 2),
@@ -151,7 +152,7 @@ def get_optimizer(method, optlib, post_processing, minimize):
151152
f"graph: {graph}, method: {method}, optlib: {optlib}, "
152153
f"post_processing: {post_processing}, minimize: {minimize}"
153154
)
154-
tc.set_contractor(
155+
tq.set_contractor(
155156
"custom",
156157
optimizer=get_optimizer(method, optlib, post_processing, minimize),
157158
contraction_info=True,

examples/ghz_dqas.py

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -6,23 +6,23 @@
66

77
sys.path.insert(0, "../")
88
import numpy as np
9-
import tensorflow as tf
9+
import torch
1010
import cirq
1111

12-
import tensorcircuit as tc
13-
from tensorcircuit.applications.vags import double_qubits_initial, GHZ_vag, GHZ_vag_tfq
14-
from tensorcircuit.applications.dqas import (
12+
import tyxonq as tq
13+
from tyxonq.applications.vags import double_qubits_initial, GHZ_vag, GHZ_vag_tfq
14+
from tyxonq.applications.dqas import (
1515
set_op_pool,
1616
get_preset,
1717
DQAS_search,
1818
)
1919

20-
tc.set_backend("tensorflow")
20+
tq.set_backend("pytorch")
2121

2222

2323
def main_tn():
2424
"""
25-
DQAS with the tensorcircuit engine backend by TensorNetwork
25+
DQAS with the tyxonq engine backend by TensorNetwork
2626
state preparation example
2727
2828
:return:
@@ -32,13 +32,13 @@ def main_tn():
3232
("ry", 0),
3333
("ry", 1),
3434
("ry", 2),
35-
("CNOT", 0, 1),
36-
("CNOT", 1, 0),
37-
("CNOT", 0, 2),
38-
("CNOT", 2, 0),
39-
("H", 0),
40-
("H", 1),
41-
("H", 2),
35+
("cnot", 0, 1),
36+
("cnot", 1, 0),
37+
("cnot", 0, 2),
38+
("cnot", 2, 0),
39+
("h", 0),
40+
("h", 1),
41+
("h", 2),
4242
]
4343
set_op_pool(ghz_pool)
4444
c = len(ghz_pool)
@@ -51,15 +51,15 @@ def main_tn():
5151
verbose=True,
5252
parallel_num=0,
5353
nnp_initial_value=np.zeros([p, c]),
54-
structure_opt=tf.keras.optimizers.Adam(learning_rate=0.15),
54+
structure_opt=torch.optim.Adam([torch.nn.Parameter(torch.zeros([p, c]))], lr=0.15),
5555
)
56-
preset = get_preset(stp).numpy()
56+
preset = get_preset(stp).detach().cpu().numpy()
5757
GHZ_vag(None, nnp, preset, verbose=True)
5858

5959

6060
def main_tfq():
6161
"""
62-
DQAS with the tensorflow quantum engine.
62+
DQAS with the pytorch quantum engine.
6363
Unitary learning example.
6464
6565
:return:
@@ -84,9 +84,9 @@ def main_tfq():
8484
verbose=False,
8585
parallel_num=0,
8686
nnp_initial_value=np.zeros([p, c]),
87-
structure_opt=tf.keras.optimizers.Adam(learning_rate=0.15),
87+
structure_opt=torch.optim.Adam([torch.nn.Parameter(torch.zeros([p, c]))], lr=0.15),
8888
)
89-
preset = get_preset(stp).numpy()
89+
preset = get_preset(stp).detach().cpu().numpy()
9090
GHZ_vag_tfq(double_qubits_initial().send(None), nnp, preset, verbose=True)
9191

9292

0 commit comments

Comments
 (0)