forked from jwsiegel2510/consistent-PINNs
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpoisson-3d-gd.py
More file actions
76 lines (59 loc) · 3.17 KB
/
poisson-3d-gd.py
File metadata and controls
76 lines (59 loc) · 3.17 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
# Author: Jonathan Siegel
#
# Tests the consistent PINNs on a 3d elliptic problem using gradient descent to train.
import math
import jax.numpy as jnp
from jax import random
from implementation.utils import plot_values
from implementation.experiments import generate_3d_poisson_experiment
from implementation.networks import ResidualReLUkNetwork
from implementation.loss_functions import OriginalPoissonPINNsLoss, ConsistentPoissonPINNsLoss
from implementation.optimization import rgd_train
### Tested number of colloation points in each direction and along the boundary.
Nlist = [10, 20, 30]
### Number of points in each direction for plotting and for calculating the error.
Ntest = 100
### Neural Network and training parameters
width = 100
depth = 8
step_size = 0.001
momentum = 0.9
decrease_interval = 4000
step_count = 40000
def train_and_test(N, Ntest, exp_type, step_size, momentum, loss_type, step_count, decrease_interval):
# Initialize the network randomly.
network = ResidualReLUkNetwork()
params = network.init_deep_network_params(3, width, depth, random.PRNGKey(0))
# Generate the data.
coords, bdy_coords, coords_test, rhs_data, bdy_data, sol, sol_grads = generate_3d_poisson_experiment(N, Ntest, exp_type)
# Create loss function.
if loss_type == 'original':
loss = OriginalPoissonPINNsLoss(coords, bdy_coords, rhs_data, bdy_data)
elif loss_type == 'original-weighted':
loss = OriginalPoissonPINNsLoss(coords, bdy_coords, rhs_data, bdy_data, bdy_weight = N)
elif loss_type == 'consistent-l2':
loss = ConsistentPoissonPINNsLoss(coords, bdy_coords, rhs_data, bdy_data, 2.0)
else:
# Use a value of gamma = 6/5 in 2d.
loss = ConsistentPoissonPINNsLoss(coords, bdy_coords, rhs_data, bdy_data, 6.0/5.0)
# Train the network.
params = rgd_train(params, network, loss, step_size, momentum, step_count, decrease_interval, verbose=True)
# Calculate and return the relative H1 error.
nn_sol = network.batched_predict(params, coords_test)
# Calculate the H1 error.
nn_grads = network.batched_grad_predict(params, coords_test)
solution_norm = (1.0/Ntest)*jnp.linalg.norm(sol_grads, 'fro') + (1.0/Ntest)*jnp.linalg.norm(sol)
error = (1.0/Ntest)*jnp.linalg.norm(sol_grads - nn_grads, 'fro') + (1.0/Ntest)*jnp.linalg.norm(nn_sol - sol)
return error / solution_norm
### Run the experiments.
experiment = 'smooth'
for N in Nlist:
print('Number of collocation points in each direction: %d' % N)
error = train_and_test(N, Ntest, experiment, step_size, momentum, 'original', step_count, decrease_interval)
print('Using the original loss gives a relative error of: %lf' % error)
error = train_and_test(N, Ntest, experiment, step_size, momentum, 'original-weighted', step_count, decrease_interval)
print('Using the weighted original loss gives a relative error of: %lf' % error)
error = train_and_test(N, Ntest, experiment, step_size, momentum, 'consistent', step_count, decrease_interval)
print('Using the consistent loss gives a relative error of: %lf' % error)
error = train_and_test(N, Ntest, experiment, step_size, momentum, 'consistent-l2', step_count, decrease_interval)
print('Using the consistent loss with L2 gives a relative error of: %lf' % error)