1+ """
2+ Quantum part in PyTorch, neural part in PyTorch, both on GPU
3+ Hybrid quantum-classical pipeline demonstration
4+ """
5+
6+ import os
7+ import time
8+ import numpy as np
9+ import torch
10+ import torchvision
11+ import tyxonq as tq
12+
13+ # Set PyTorch device
14+ if torch .cuda .is_available ():
15+ device = torch .device ("cuda" )
16+ else :
17+ device = torch .device ("cpu" )
18+ print (device )
19+
20+ # Set quantum computing backend to PyTorch
21+ K = tq .set_backend ("pytorch" )
22+
23+ # Prepare dataset using torchvision
24+ train_dataset = torchvision .datasets .MNIST (
25+ root = './data' ,
26+ train = True ,
27+ download = True ,
28+ transform = torchvision .transforms .ToTensor ()
29+ )
30+
31+ test_dataset = torchvision .datasets .MNIST (
32+ root = './data' ,
33+ train = False ,
34+ download = True ,
35+ transform = torchvision .transforms .ToTensor ()
36+ )
37+
38+ # Convert to NumPy arrays to maintain original processing logic
39+ x_train = train_dataset .data .numpy ()
40+ y_train = train_dataset .targets .numpy ()
41+ x_test = test_dataset .data .numpy ()
42+ y_test = test_dataset .targets .numpy ()
43+
44+ x_train = x_train [..., np .newaxis ] / 255.0
45+
46+ def filter_pair (x , y , a , b ):
47+ """Filter dataset to only include two specified classes"""
48+ keep = (y == a ) | (y == b )
49+ x , y = x [keep ], y [keep ]
50+ y = y == a
51+ return x , y
52+
53+ x_train , y_train = filter_pair (x_train , y_train , 1 , 5 )
54+
55+ # Use torch.nn.functional.interpolate
56+ import torch .nn .functional as F
57+ x_train_small = F .interpolate (
58+ torch .tensor (x_train ).permute (0 , 3 , 1 , 2 ),
59+ size = (3 , 3 ),
60+ mode = 'bilinear' ,
61+ align_corners = False
62+ ).permute (0 , 2 , 3 , 1 ).numpy ()
63+
64+ x_train_bin = np .array (x_train_small > 0.5 , dtype = np .float32 )
65+ x_train_bin = np .squeeze (x_train_bin ).reshape ([- 1 , 9 ])
66+ y_train_torch = torch .tensor (y_train , dtype = torch .float32 )
67+ x_train_torch = torch .tensor (x_train_bin )
68+ x_train_torch = x_train_torch .to (device = device )
69+ y_train_torch = y_train_torch .to (device = device )
70+
71+ n = 9
72+ nlayers = 3
73+
74+ # Define the quantum function (using PyTorch backend)
75+ def qpreds (x , weights ):
76+ """Quantum circuit for predictions"""
77+ c = tq .Circuit (n )
78+ for i in range (n ):
79+ c .rx (i , theta = x [i ])
80+ for j in range (nlayers ):
81+ for i in range (n - 1 ):
82+ c .cnot (i , i + 1 )
83+ for i in range (n ):
84+ c .rx (i , theta = weights [2 * j , i ])
85+ c .ry (i , theta = weights [2 * j + 1 , i ])
86+
87+ return K .stack ([K .real (c .expectation_ps (z = [i ])) for i in range (n )])
88+
89+ # Create quantum neural network layer
90+ quantumnet = tq .TorchLayer (
91+ qpreds ,
92+ weights_shape = [2 * nlayers , n ],
93+ use_vmap = True ,
94+ use_interface = True ,
95+ use_jit = True ,
96+ enable_dlpack = True , # Enable DLPack for efficient data transfer
97+ )
98+
99+ model = torch .nn .Sequential (quantumnet , torch .nn .Linear (9 , 1 ), torch .nn .Sigmoid ())
100+ model = model .to (device = device )
101+
102+ criterion = torch .nn .BCELoss ()
103+ opt = torch .optim .Adam (model .parameters (), lr = 1e-2 )
104+ nepochs = 300
105+ nbatch = 32
106+ times = []
107+ for epoch in range (nepochs ):
108+ index = np .random .randint (low = 0 , high = 100 , size = nbatch )
109+ inputs , labels = x_train_torch [index ], y_train_torch [index ]
110+ opt .zero_grad ()
111+
112+ with torch .set_grad_enabled (True ):
113+ time0 = time .time ()
114+ yps = model (inputs )
115+ loss = criterion (
116+ torch .reshape (yps , [nbatch , 1 ]), torch .reshape (labels , [nbatch , 1 ])
117+ )
118+ loss .backward ()
119+ if epoch % 100 == 0 :
120+ print (loss )
121+ opt .step ()
122+ time1 = time .time ()
123+ times .append (time1 - time0 )
124+ print ("Training time per step: " , np .mean (times [1 :]))
0 commit comments