Skip to content

Commit 5c24f07

Browse files
update code
1 parent 64eedf7 commit 5c24f07

File tree

8 files changed

+52
-94
lines changed

8 files changed

+52
-94
lines changed

examples/NLS-MB/NLS-MB_optical_rogue_wave.py

-1
Original file line numberDiff line numberDiff line change
@@ -258,7 +258,6 @@ def train(cfg: DictConfig):
258258
epochs=EPOCHS,
259259
equation=equation,
260260
validator=validator,
261-
cfg=cfg,
262261
)
263262
# train model
264263
solver.train()

examples/ldc/conf/ldc2d_modulus_importance_sampling.yaml

-69
This file was deleted.

examples/lorenz/train_enn.py

+17-2
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,15 @@
1818

1919
# This file is for step1: training a embedding model.
2020
# This file is based on PaddleScience/ppsci API.
21+
from os import path as osp
2122

2223
import hydra
2324
import numpy as np
2425
import paddle
2526
from omegaconf import DictConfig
2627

2728
import ppsci
29+
from ppsci.utils import logger
2830

2931

3032
def get_mean_std(data: np.ndarray):
@@ -38,6 +40,11 @@ def get_mean_std(data: np.ndarray):
3840

3941

4042
def train(cfg: DictConfig):
43+
# set random seed for reproducibility
44+
ppsci.utils.misc.set_random_seed(cfg.seed)
45+
# initialize logger
46+
logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info")
47+
4148
weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e4 * cfg.TRAIN_BLOCK_SIZE)
4249
regularization_key = "k_matrix"
4350
# manually build constraint(s)
@@ -130,9 +137,12 @@ def train(cfg: DictConfig):
130137
solver = ppsci.solver.Solver(
131138
model,
132139
constraint,
133-
optimizer=optimizer,
140+
cfg.output_dir,
141+
optimizer,
142+
epochs=cfg.TRAIN.epochs,
143+
iters_per_epoch=ITERS_PER_EPOCH,
144+
eval_during_train=True,
134145
validator=validator,
135-
cfg=cfg,
136146
)
137147
# train model
138148
solver.train()
@@ -141,6 +151,11 @@ def train(cfg: DictConfig):
141151

142152

143153
def evaluate(cfg: DictConfig):
154+
# set random seed for reproducibility
155+
ppsci.utils.misc.set_random_seed(cfg.seed)
156+
# initialize logger
157+
logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info")
158+
144159
weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e4 * cfg.TRAIN_BLOCK_SIZE)
145160
regularization_key = "k_matrix"
146161
# manually build constraint(s)

examples/lorenz/train_transformer.py

+14-2
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818

1919
# This file is for step2: training a transformer model, based on frozen pretrained embedding model.
2020
# This file is based on PaddleScience/ppsci API.
21+
from os import path as osp
2122
from typing import Dict
2223

2324
import hydra
@@ -26,6 +27,7 @@
2627

2728
import ppsci
2829
from ppsci.arch import base
30+
from ppsci.utils import logger
2931
from ppsci.utils import save_load
3032

3133

@@ -55,6 +57,9 @@ def train(cfg: DictConfig):
5557
# valid time-series: 64 time-steps: 1024 block-size: 256 stride: 1024
5658
# test time-series: 256 time-steps: 1024
5759
# set random seed for reproducibility
60+
ppsci.utils.misc.set_random_seed(cfg.seed)
61+
# initialize logger
62+
logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info")
5863

5964
embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH)
6065
output_transform = OutputTransform(embedding_model)
@@ -147,10 +152,14 @@ def train(cfg: DictConfig):
147152
solver = ppsci.solver.Solver(
148153
model,
149154
constraint,
150-
optimizer=optimizer,
155+
cfg.output_dir,
156+
optimizer,
157+
epochs=cfg.TRAIN.epochs,
158+
iters_per_epoch=ITERS_PER_EPOCH,
159+
eval_during_train=cfg.TRAIN.eval_during_train,
160+
eval_freq=cfg.TRAIN.eval_freq,
151161
validator=validator,
152162
visualizer=visualizer,
153-
cfg=cfg,
154163
)
155164
# train model
156165
solver.train()
@@ -161,6 +170,9 @@ def train(cfg: DictConfig):
161170

162171

163172
def evaluate(cfg: DictConfig):
173+
# directly evaluate pretrained model(optional)
174+
logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info")
175+
164176
embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH)
165177
output_transform = OutputTransform(embedding_model)
166178

examples/nsfnet/conf/VP_NSFNet4.yaml

+15-15
Original file line numberDiff line numberDiff line change
@@ -29,40 +29,40 @@ hydra:
2929
sweep:
3030
# output directory for multirun
3131
dir: ${hydra.run.dir}
32-
seed: 1234
32+
33+
mode: train
3334
output_dir: ${hydra:run.dir}
35+
seed: 1234
3436
data_dir: ./data/
35-
log_freq: 20
37+
log_freq: 5000
38+
ntrain: 11333
39+
nb_train: 2952
40+
n0_train: 986
41+
alpha: 100
42+
beta: 100
43+
re: 999.35
44+
3645
MODEL:
3746
input_keys: ["x", "y","z","t"]
3847
output_keys: ["u", "v", "w","p"]
3948
num_layers: 10
4049
hidden_size: 300
4150
activation: "tanh"
4251
weight_norm: True
43-
mode: train
44-
ntrain: 11333
45-
nb_train: 2952
46-
n0_train: 986
47-
alpha: 100
48-
beta: 100
49-
re: 999.35
50-
epochs: 15250
5152
TRAIN:
52-
log_freq: 5000
53+
epochs: 15250
5354
eval_freq: 5000
5455
save_freq: 5000
55-
eval_with_no_grad: true
56+
iters_per_epoch: 150
5657
lr_scheduler:
57-
epochs: 15250
58+
epochs: ${TRAIN.epochs}
5859
decay_epochs: [250, 4500, 5000, 5500]
59-
iters_per_epoch: 150
6060
values: [1e-3, 1e-4, 1e-5, 1e-6, 1e-7]
61+
6162
EVAL:
6263
pretrained_model_path: null
6364
eval_with_no_grad: true
6465

65-
6666
INFER:
6767
pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/nsfnet/nsfnet4.pdparams
6868
export_path: ./inference/VP_NSFNet4

examples/pipe/poiseuille_flow.py

+1
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ def output_trans_p(input, out):
140140
solver = ppsci.solver.Solver(
141141
model,
142142
constraint,
143+
iters_per_epoch=ITERS_PER_EPOCH,
143144
optimizer=optimizer,
144145
equation=equation,
145146
cfg=cfg,

examples/tempoGAN/conf/tempogan.yaml

+2-2
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,10 @@ output_dir: ${hydra:run.dir}
3838
log_freq: 20
3939
DATASET_PATH: ./datasets/tempoGAN/2d_train.mat
4040
DATASET_PATH_VALID: ./datasets/tempoGAN/2d_valid.mat
41+
use_amp: true
42+
amp_level: O2
4143

4244
# set working condition
43-
USE_AMP: true
4445
USE_SPATIALDISC: true
4546
USE_TEMPODISC: true
4647
WEIGHT_GEN: [5.0, 0.0, 1.0] # lambda_l1, lambda_l2, lambda_t
@@ -96,7 +97,6 @@ TRAIN:
9697
gamma: 0.05
9798
by_epoch: true
9899
eval_during_train: false
99-
amp_level: O2
100100
pretrained_model_path: null
101101
checkpoint_path: null
102102

examples/tempoGAN/tempoGAN.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -223,21 +223,21 @@ def train(cfg: DictConfig):
223223
solver_gen = ppsci.solver.Solver(
224224
model_list,
225225
constraint_gen,
226-
optimizer_gen,
226+
optimizer=optimizer_gen,
227227
cfg=cfg,
228228
)
229229
if cfg.USE_SPATIALDISC:
230230
solver_disc = ppsci.solver.Solver(
231231
model_list,
232232
constraint_disc,
233-
optimizer_disc,
233+
optimizer=optimizer_disc,
234234
cfg=cfg,
235235
)
236236
if cfg.USE_TEMPODISC:
237237
solver_disc_tempo = ppsci.solver.Solver(
238238
model_list,
239239
constraint_disc_tempo,
240-
optimizer_disc_tempo,
240+
optimizer=optimizer_disc_tempo,
241241
cfg=cfg,
242242
)
243243

0 commit comments

Comments
 (0)