-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathOPTXC.py
More file actions
134 lines (113 loc) · 4.18 KB
/
OPTXC.py
File metadata and controls
134 lines (113 loc) · 4.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
import numpy as np
from scipy.optimize import minimize
from energy_get import EnergyGetter
import jax.numpy as jnp
import jax
class OPTXC:
def __init__(self, mol2):
self.mol2 = mol2
self.initial_omega = 0.3
self.initial_alpha = 0.8
self.initial_beta = 0.2
def get_loss(self, params):
omega, alpha, beta = params
energy_getter = EnergyGetter(
self.mol2,
alpha=alpha,
beta=beta,
omega=omega
)
j, _, _, _ = energy_getter.forward()
loss = j
return loss
def numerical_grad(self, params, eps=1e-5):
omega, alpha, beta = params
loss0 = self.get_loss(params)
# 对omega的梯度
grad_omega = (self.get_loss((omega + eps, alpha, beta)) - loss0) / eps
# 对alpha的梯度
grad_alpha = (self.get_loss((omega, alpha + eps, beta)) - loss0) / eps
# 对beta的梯度
grad_beta = (self.get_loss((omega, alpha, beta + eps)) - loss0) / eps
return jnp.array([grad_omega, grad_alpha, grad_beta])
def optimize(self, steps=200, lr=0.001):
"""
简单的梯度下降优化
"""
# 初始化参数
params = (
jnp.array(float(self.initial_omega)),
jnp.array(float(self.initial_alpha)),
jnp.array(float(self.initial_beta))
)
print(params)
print(f"初始参数: omega={params[0]:.6f}, alpha={params[1]:.6f}, beta={params[2]:.6f}")
for step in range(steps):
# 计算梯度
grads = self.numerical_grad(params)
# 更新参数
new_omega = float(f"{(params[0] - lr * grads[0]):.10f}")
new_alpha = float(f"{(params[1] - lr * grads[1]):.10f}")
new_beta = float(f"{(params[2] - lr * grads[2]):.10f}")
params = np.array([abs(new_omega), abs(new_alpha), abs(new_beta)])
# 计算当前损失
loss = self.get_loss(params)
print(f"步骤 {step+1}: 损失={loss:.6f}, "
f"omega={params[0]:.6f}, alpha={params[1]:.6f}, beta={params[2]:.6f}")
return params
def cam_b3lyp_tddft_example():
"""
使用 PySCF 进行 CAM-B3LYP 优化并在优化后进行 TDDFT 计算的最小示例。
默认分子为水 (def2-svp 基组):
1. 先用 CAM-B3LYP 做 SCF
2. 使用内置几何优化器优化结构
3. 基于优化后的波函数运行 TDDFT 得到前几个激发能
运行示例:
>>> from OPTXC import cam_b3lyp_tddft_example
>>> cam_b3lyp_tddft_example()
"""
from pyscf import gto, dft, tddft
from pyscf.geomopt.geometric_solver import optimize
mol = gto.M(
atom="""
O 0.000000 0.000000 0.000000
H 0.000000 -0.757000 0.587000
H 0.000000 0.757000 0.587000
""",
basis="def2-svp",
unit="Angstrom",
verbose=4,
)
# CAM-B3LYP 单点与几何优化
mf = dft.RKS(mol, xc="cam-b3lyp")
mf.kernel()
opt_mf = optimize(mf)
print("优化后几何 (Å):")
print(opt_mf.mol.atom_coords())
print("优化后能量 (Ha):", opt_mf.e_tot)
# TDDFT 计算前几个激发态
td = tddft.TDDFT(opt_mf)
td.nstates = 5
td.kernel()
hartree_to_ev = 27.211386245988
osc = td.oscillator_strength()
for i, (e, f) in enumerate(zip(td.e, osc), start=1):
print(
f"态 {i}: 激发能 = {e * hartree_to_ev:.4f} eV, "
f"振子强度 f = {f:.4f}"
)
return opt_mf, td
if __name__ == '__main__':
mol2 = '/home/yjiao/DeepRSH/module/net.mol2'
optxc = OPTXC(mol2)
initial_params = (
jnp.array(optxc.initial_omega),
jnp.array(optxc.initial_alpha),
jnp.array(optxc.initial_beta)
)
loss = optxc.get_loss(initial_params)
print('初始损失:', loss)
print("\n开始优化:")
final_params = optxc.optimize(steps=100, lr=0.5)
# 若仅想运行 PySCF 的 CAM-B3LYP + TDDFT 示例,可使用:
# cam_b3lyp_tddft_example()