diff --git a/docs/zh/examples/amgnet.md b/docs/zh/examples/amgnet.md index 06647cc21..dcb8e904e 100644 --- a/docs/zh/examples/amgnet.md +++ b/docs/zh/examples/amgnet.md @@ -103,17 +103,17 @@ unzip data.zip === "airfoil" - ``` py linenums="61" + ``` py linenums="55" --8<-- - examples/amgnet/amgnet_airfoil.py:61:62 + examples/amgnet/amgnet_airfoil.py:55:56 --8<-- ``` === "cylinder" - ``` py linenums="61" + ``` py linenums="55" --8<-- - examples/amgnet/amgnet_cylinder.py:61:62 + examples/amgnet/amgnet_cylinder.py:55:56 --8<-- ``` diff --git a/examples/NLS-MB/NLS-MB_optical_rogue_wave.py b/examples/NLS-MB/NLS-MB_optical_rogue_wave.py index 939e88667..0b860fbf9 100644 --- a/examples/NLS-MB/NLS-MB_optical_rogue_wave.py +++ b/examples/NLS-MB/NLS-MB_optical_rogue_wave.py @@ -233,15 +233,10 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, - geom=geom, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -259,14 +254,12 @@ def train(cfg: DictConfig): model, constraint, OUTPUT_DIR, - optimizer_lbfgs, - None, - EPOCHS, - cfg.TRAIN.lbfgs.iters_per_epoch, + optimizer=optimizer_lbfgs, + epochs=EPOCHS, + iters_per_epoch=cfg.TRAIN.lbfgs.iters_per_epoch, eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, eval_freq=cfg.TRAIN.lbfgs.eval_freq, equation=equation, - geom=geom, validator=validator, ) # train model @@ -341,12 +334,9 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - eval_freq=cfg.TRAIN.eval_freq, equation=equation, - geom=geom, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() @@ -374,7 +364,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/NLS-MB/NLS-MB_optical_soliton.py b/examples/NLS-MB/NLS-MB_optical_soliton.py index 14a5a5d72..fb2d804f5 100644 --- a/examples/NLS-MB/NLS-MB_optical_soliton.py +++ b/examples/NLS-MB/NLS-MB_optical_soliton.py @@ -210,15 +210,10 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, - geom=geom, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -237,13 +232,11 @@ def train(cfg: DictConfig): constraint, OUTPUT_DIR, optimizer_lbfgs, - None, - EPOCHS, - cfg.TRAIN.lbfgs.iters_per_epoch, + epochs=EPOCHS, + iters_per_epoch=cfg.TRAIN.lbfgs.iters_per_epoch, eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, eval_freq=cfg.TRAIN.lbfgs.eval_freq, equation=equation, - geom=geom, validator=validator, ) # train model @@ -318,12 +311,9 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - eval_freq=cfg.TRAIN.eval_freq, equation=equation, - geom=geom, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() @@ -351,7 +341,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml b/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml index b6a97d772..98764d890 100644 --- a/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml +++ b/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/NLS-MB/conf/NLS-MB_soliton.yaml b/examples/NLS-MB/conf/NLS-MB_soliton.yaml index 60a7c049d..94eabfdff 100644 --- a/examples/NLS-MB/conf/NLS-MB_soliton.yaml +++ b/examples/NLS-MB/conf/NLS-MB_soliton.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/RegAE/RegAE.py b/examples/RegAE/RegAE.py index 455290f93..89347714e 100644 --- a/examples/RegAE/RegAE.py +++ b/examples/RegAE/RegAE.py @@ -14,23 +14,15 @@ from __future__ import annotations -from os import path as osp - import hydra import paddle from omegaconf import DictConfig from paddle.nn import functional as F import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set model model = ppsci.arch.AutoEncoder(**cfg.MODEL) @@ -99,16 +91,9 @@ def loss_expr(output_dict, label_dict, weight_dict=None): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -117,11 +102,6 @@ def loss_expr(output_dict, label_dict, weight_dict=None): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set model model = ppsci.arch.AutoEncoder(**cfg.MODEL) @@ -151,11 +131,8 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - None, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate after finished training solver.eval() diff --git a/examples/RegAE/conf/RegAE.yaml b/examples/RegAE/conf/RegAE.yaml index 0da588184..2f13c79cb 100644 --- a/examples/RegAE/conf/RegAE.yaml +++ b/examples/RegAE/conf/RegAE.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/allen_cahn/allen_cahn_causal.py b/examples/allen_cahn/allen_cahn_causal.py index 58e6c798e..55c734c83 100644 --- a/examples/allen_cahn/allen_cahn_causal.py +++ b/examples/allen_cahn/allen_cahn_causal.py @@ -158,22 +158,9 @@ def gen_label_batch(input_batch): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=True, - eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, + optimizer=optimizer, equation=equation, validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - use_tbd=True, cfg=cfg, ) # train model @@ -222,11 +209,8 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate after finished training @@ -248,7 +232,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/allen_cahn/allen_cahn_plain.py b/examples/allen_cahn/allen_cahn_plain.py index 39a07b122..f13af182c 100644 --- a/examples/allen_cahn/allen_cahn_plain.py +++ b/examples/allen_cahn/allen_cahn_plain.py @@ -156,22 +156,9 @@ def gen_label_batch(input_batch): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=True, - eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, + optimizer=optimizer, equation=equation, validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - use_tbd=True, cfg=cfg, ) # train model @@ -220,11 +207,8 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate after finished training @@ -246,7 +230,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/allen_cahn/conf/allen_cahn.yaml b/examples/allen_cahn/conf/allen_cahn.yaml index c4cf52253..4fbc18617 100644 --- a/examples/allen_cahn/conf/allen_cahn.yaml +++ b/examples/allen_cahn/conf/allen_cahn.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,16 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml b/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml index afd457a29..4e577f150 100644 --- a/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml +++ b/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,16 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/allen_cahn/conf/allen_cahn_defalut_ntk.yaml b/examples/allen_cahn/conf/allen_cahn_defalut_ntk.yaml index 0a4eac375..7fe499eed 100644 --- a/examples/allen_cahn/conf/allen_cahn_defalut_ntk.yaml +++ b/examples/allen_cahn/conf/allen_cahn_defalut_ntk.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,16 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/allen_cahn/conf/allen_cahn_default.yaml b/examples/allen_cahn/conf/allen_cahn_default.yaml index 4ca236db7..b5c451de0 100644 --- a/examples/allen_cahn/conf/allen_cahn_default.yaml +++ b/examples/allen_cahn/conf/allen_cahn_default.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,16 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/allen_cahn/conf/allen_cahn_sota.yaml b/examples/allen_cahn/conf/allen_cahn_sota.yaml index 37a0ca140..207c87d97 100644 --- a/examples/allen_cahn/conf/allen_cahn_sota.yaml +++ b/examples/allen_cahn/conf/allen_cahn_sota.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,16 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/amgnet/amgnet_airfoil.py b/examples/amgnet/amgnet_airfoil.py index 7bcf9f854..f527da297 100644 --- a/examples/amgnet/amgnet_airfoil.py +++ b/examples/amgnet/amgnet_airfoil.py @@ -14,7 +14,6 @@ from __future__ import annotations -from os import path as osp from typing import TYPE_CHECKING from typing import Dict from typing import List @@ -53,11 +52,6 @@ def eval_rmse_func( def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set airfoil model model = ppsci.arch.AMGNet(**cfg.MODEL) @@ -76,7 +70,6 @@ def train(cfg: DictConfig): "drop_last": False, "shuffle": True, }, - "num_workers": 1, } # set constraint @@ -102,11 +95,6 @@ def train(cfg: DictConfig): "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } rmse_validator = ppsci.validate.SupervisedValidator( eval_dataloader_cfg, @@ -121,16 +109,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -152,11 +133,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set airfoil model model = ppsci.arch.AMGNet(**cfg.MODEL) @@ -170,11 +146,6 @@ def evaluate(cfg: DictConfig): "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } rmse_validator = ppsci.validate.SupervisedValidator( eval_dataloader_cfg, @@ -187,12 +158,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate model solver.eval() diff --git a/examples/amgnet/amgnet_cylinder.py b/examples/amgnet/amgnet_cylinder.py index 4203f6052..15187c404 100644 --- a/examples/amgnet/amgnet_cylinder.py +++ b/examples/amgnet/amgnet_cylinder.py @@ -14,7 +14,6 @@ from __future__ import annotations -from os import path as osp from typing import TYPE_CHECKING from typing import Dict from typing import List @@ -53,11 +52,6 @@ def eval_rmse_func( def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set cylinder model model = ppsci.arch.AMGNet(**cfg.MODEL) @@ -76,7 +70,6 @@ def train(cfg: DictConfig): "drop_last": False, "shuffle": True, }, - "num_workers": 1, } # set constraint @@ -102,11 +95,6 @@ def train(cfg: DictConfig): "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } rmse_validator = ppsci.validate.SupervisedValidator( eval_dataloader_cfg, @@ -121,16 +109,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -152,11 +133,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set airfoil model model = ppsci.arch.AMGNet(**cfg.MODEL) @@ -170,11 +146,6 @@ def evaluate(cfg: DictConfig): "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } rmse_validator = ppsci.validate.SupervisedValidator( eval_dataloader_cfg, @@ -187,12 +158,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate model solver.eval() diff --git a/examples/amgnet/conf/amgnet_airfoil.yaml b/examples/amgnet/conf/amgnet_airfoil.yaml index c2ccb3a58..c49f7cc63 100644 --- a/examples/amgnet/conf/amgnet_airfoil.yaml +++ b/examples/amgnet/conf/amgnet_airfoil.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/amgnet/conf/amgnet_cylinder.yaml b/examples/amgnet/conf/amgnet_cylinder.yaml index 4ced24db2..cf2d5f1cb 100644 --- a/examples/amgnet/conf/amgnet_cylinder.yaml +++ b/examples/amgnet/conf/amgnet_cylinder.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/aneurysm/aneurysm.py b/examples/aneurysm/aneurysm.py index c67ba7dd4..0a87d4728 100644 --- a/examples/aneurysm/aneurysm.py +++ b/examples/aneurysm/aneurysm.py @@ -52,7 +52,6 @@ def train(cfg: DictConfig): "drop_last": True, "shuffle": True, }, - "num_workers": 1, } # set constraint @@ -186,7 +185,6 @@ def inlet_w_ref_func(_in): "input": input_dict, "label": label_dict, }, - "sampler": {"name": "BatchSampler"}, "num_workers": 1, } sup_validator = ppsci.validate.SupervisedValidator( @@ -222,23 +220,11 @@ def inlet_w_ref_func(_in): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=True, - eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, + optimizer=optimizer, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -284,7 +270,6 @@ def evaluate(cfg: DictConfig): "input": input_dict, "label": label_dict, }, - "sampler": {"name": "BatchSampler"}, "num_workers": 1, } sup_validator = ppsci.validate.SupervisedValidator( @@ -319,13 +304,9 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate solver.eval() @@ -340,7 +321,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/aneurysm/aneurysm_flow.py b/examples/aneurysm/aneurysm_flow.py index 8c788c807..359552142 100644 --- a/examples/aneurysm/aneurysm_flow.py +++ b/examples/aneurysm/aneurysm_flow.py @@ -177,7 +177,6 @@ def output_transform_p(self, in_, out): geom=geom["interior"], dataloader_cfg={ "dataset": "NamedArrayDataset", - "num_workers": 1, "batch_size": cfg.TRAIN.batch_size, "iters_per_epoch": int(x.shape[0] / cfg.TRAIN.batch_size), "sampler": { @@ -276,11 +275,7 @@ def output_transform_p(self, in_, out): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) def model_predict( diff --git a/examples/aneurysm/conf/aneurysm.yaml b/examples/aneurysm/conf/aneurysm.yaml index d7d353a25..4181bbdce 100644 --- a/examples/aneurysm/conf/aneurysm.yaml +++ b/examples/aneurysm/conf/aneurysm.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,16 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/aneurysm/conf/aneurysm_flow.yaml b/examples/aneurysm/conf/aneurysm_flow.yaml index 798a0fb91..785bd0285 100644 --- a/examples/aneurysm/conf/aneurysm_flow.yaml +++ b/examples/aneurysm/conf/aneurysm_flow.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/biharmonic2d/biharmonic2d.py b/examples/biharmonic2d/biharmonic2d.py index ec599f6ce..54cb68a56 100644 --- a/examples/biharmonic2d/biharmonic2d.py +++ b/examples/biharmonic2d/biharmonic2d.py @@ -69,11 +69,6 @@ def plotting(figname, output_dir, data, griddata_points, griddata_xi, boundary): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set models disp_net = ppsci.arch.MLP(**cfg.MODEL) @@ -229,16 +224,8 @@ def train(cfg: DictConfig): solver_adam = ppsci.solver.Solver( disp_net, constraint, - cfg.output_dir, optimizer_adam, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - seed=cfg.seed, equation=equation, - geom=geom, checkpoint_path=cfg.TRAIN.checkpoint_path, pretrained_model_path=cfg.TRAIN.pretrained_model_path, ) @@ -250,35 +237,24 @@ def train(cfg: DictConfig): solver_lbfgs = ppsci.solver.Solver( disp_net, constraint, - cfg.output_dir, optimizer_lbfgs, - None, - 1, - 1, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - seed=cfg.seed, + epochs=1, + iters_per_epoch=1, equation=equation, - geom=geom, - checkpoint_path=cfg.TRAIN.checkpoint_path, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, + cfg=cfg, ) # evaluate after finished training solver_lbfgs.train() def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set models disp_net = ppsci.arch.MLP(**cfg.MODEL) # load pretrained model solver = ppsci.solver.Solver( - model=disp_net, pretrained_model_path=cfg.EVAL.pretrained_model_path + model=disp_net, + cfg=cfg, ) # generate samples @@ -359,7 +335,8 @@ def export(cfg: DictConfig): # load pretrained model solver = ppsci.solver.Solver( - model=disp_net, pretrained_model_path=cfg.INFER.pretrained_model_path + model=disp_net, + cfg=cfg, ) class Wrapped_Model(nn.Layer): diff --git a/examples/biharmonic2d/conf/biharmonic2d.yaml b/examples/biharmonic2d/conf/biharmonic2d.yaml index 9c500f32a..8b4f2dab5 100644 --- a/examples/biharmonic2d/conf/biharmonic2d.yaml +++ b/examples/biharmonic2d/conf/biharmonic2d.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,17 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/bracket/bracket.py b/examples/bracket/bracket.py index 5d85b535a..4528046f7 100644 --- a/examples/bracket/bracket.py +++ b/examples/bracket/bracket.py @@ -59,7 +59,6 @@ def train(cfg: DictConfig): "drop_last": True, "shuffle": True, }, - "num_workers": 1, } # set constraint @@ -277,11 +276,6 @@ def train(cfg: DictConfig): "input": input_dict, "label": label_dict, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator = ppsci.validate.SupervisedValidator( {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size.sup_validator}, @@ -325,22 +319,11 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, + optimizer=optimizer, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -453,11 +436,6 @@ def evaluate(cfg: DictConfig): "input": input_dict, "label": label_dict, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator = ppsci.validate.SupervisedValidator( {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size.sup_validator}, @@ -500,13 +478,9 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate solver.eval() diff --git a/examples/bracket/conf/bracket.yaml b/examples/bracket/conf/bracket.yaml index 5e4682b4f..24ce3df3d 100644 --- a/examples/bracket/conf/bracket.yaml +++ b/examples/bracket/conf/bracket.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/bubble/bubble.py b/examples/bubble/bubble.py index a6adbd0d9..11fe421a1 100644 --- a/examples/bubble/bubble.py +++ b/examples/bubble/bubble.py @@ -32,11 +32,6 @@ def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # load Data data = scipy.io.loadmat(cfg.DATA_PATH) # normalize data @@ -171,11 +166,6 @@ def transform_out(in_, out): "label": test_label, }, "batch_size": cfg.TRAIN.batch_size.mse_validator, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), metric={"MSE": ppsci.metric.MSE()}, @@ -189,15 +179,9 @@ def transform_out(in_, out): solver = ppsci.solver.Solver( model_list, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - geom=geom, + optimizer=optimizer, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -249,11 +233,6 @@ def transform_out(in_, out): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # load Data data = scipy.io.loadmat(cfg.DATA_PATH) # normalize data @@ -343,11 +322,6 @@ def transform_out(in_, out): "label": test_label, }, "batch_size": cfg.TRAIN.batch_size.mse_validator, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), metric={"MSE": ppsci.metric.MSE()}, @@ -360,10 +334,8 @@ def transform_out(in_, out): # directly evaluate pretrained model(optional) solver = ppsci.solver.Solver( model_list, - output_dir=cfg.output_dir, - geom=geom, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() diff --git a/examples/bubble/conf/bubble.yaml b/examples/bubble/conf/bubble.yaml index 63c74d7dd..bc1ef7c10 100644 --- a/examples/bubble/conf/bubble.yaml +++ b/examples/bubble/conf/bubble.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/cfdgcn/cfdgcn.py b/examples/cfdgcn/cfdgcn.py index d06d9c0c9..2b73a3d07 100644 --- a/examples/cfdgcn/cfdgcn.py +++ b/examples/cfdgcn/cfdgcn.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os from typing import Dict from typing import List @@ -25,7 +24,6 @@ from paddle.nn import functional as F import ppsci -from ppsci.utils import logger def train_mse_func( @@ -49,11 +47,6 @@ def eval_rmse_func( def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", os.path.join(cfg.output_dir, "train.log"), "info") - # set dataloader config train_dataloader_cfg = { "dataset": { @@ -107,11 +100,6 @@ def train(cfg: DictConfig): "transpose_edges": True, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } rmse_validator = ppsci.validate.SupervisedValidator( eval_dataloader_cfg, @@ -126,17 +114,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model @@ -174,7 +154,6 @@ def evaluate(cfg: DictConfig): "drop_last": False, "shuffle": True, }, - "num_workers": 1, } # set constraint @@ -207,11 +186,6 @@ def evaluate(cfg: DictConfig): "transpose_edges": True, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } rmse_validator = ppsci.validate.SupervisedValidator( eval_dataloader_cfg, @@ -224,12 +198,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate model diff --git a/examples/cfdgcn/conf/cfdgcn.yaml b/examples/cfdgcn/conf/cfdgcn.yaml index ad55a9e14..912497cda 100644 --- a/examples/cfdgcn/conf/cfdgcn.yaml +++ b/examples/cfdgcn/conf/cfdgcn.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/chip_heat/chip_heat.py b/examples/chip_heat/chip_heat.py index 775231733..59a6d6807 100644 --- a/examples/chip_heat/chip_heat.py +++ b/examples/chip_heat/chip_heat.py @@ -498,14 +498,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -708,9 +703,8 @@ def evaluate(cfg: DictConfig): # directly evaluate pretrained model(optional) solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() # visualize prediction result diff --git a/examples/chip_heat/conf/chip_heat.yaml b/examples/chip_heat/conf/chip_heat.yaml index c57e1036f..edd1dd0eb 100644 --- a/examples/chip_heat/conf/chip_heat.yaml +++ b/examples/chip_heat/conf/chip_heat.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/control_arm/conf/forward_analysis.yaml b/examples/control_arm/conf/forward_analysis.yaml index 36af28f44..bf1ee7b20 100644 --- a/examples/control_arm/conf/forward_analysis.yaml +++ b/examples/control_arm/conf/forward_analysis.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,17 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/control_arm/conf/inverse_parameter.yaml b/examples/control_arm/conf/inverse_parameter.yaml index 374492912..7127d1454 100644 --- a/examples/control_arm/conf/inverse_parameter.yaml +++ b/examples/control_arm/conf/inverse_parameter.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,17 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/control_arm/forward_analysis.py b/examples/control_arm/forward_analysis.py index 31f138b6f..8e6f3ab43 100644 --- a/examples/control_arm/forward_analysis.py +++ b/examples/control_arm/forward_analysis.py @@ -1,3 +1,17 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from os import path as osp import hydra @@ -6,14 +20,9 @@ from paddle import distributed as dist import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") # set parallel enable_parallel = dist.get_world_size() > 1 @@ -55,7 +64,6 @@ def train(cfg: DictConfig): "drop_last": True, "shuffle": True, }, - "num_workers": 1, } # set constraint @@ -187,21 +195,10 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model_list, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - seed=cfg.seed, + optimizer=optimizer, equation=equation, - geom=geom, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_freq=cfg.TRAIN.eval_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_with_no_grad=cfg.TRAIN.eval_with_no_grad, visualizer=visualizer, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model @@ -212,11 +209,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) @@ -268,13 +260,8 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model_list, - output_dir=cfg.output_dir, - seed=cfg.seed, - geom=geom, - log_freq=cfg.log_freq, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # visualize prediction after finished training diff --git a/examples/control_arm/inverse_parameter.py b/examples/control_arm/inverse_parameter.py index 77f17c416..e608e6526 100644 --- a/examples/control_arm/inverse_parameter.py +++ b/examples/control_arm/inverse_parameter.py @@ -1,18 +1,26 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from os import path as osp import hydra from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) @@ -67,7 +75,6 @@ def train(cfg: DictConfig): "drop_last": True, "shuffle": True, }, - "num_workers": 1, "batch_size": cfg.TRAIN.batch_size.arm_interior, }, ppsci.loss.MSELoss("sum"), @@ -98,11 +105,6 @@ def train(cfg: DictConfig): geom["geo"], { "dataset": "NamedArrayDataset", - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "total_size": cfg.EVAL.total_size.validator, "batch_size": cfg.EVAL.batch_size.validator, }, @@ -143,22 +145,11 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - seed=cfg.seed, + optimizer=optimizer, equation=equation, - geom=geom, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_freq=cfg.TRAIN.eval_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_with_no_grad=cfg.TRAIN.eval_with_no_grad, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, + cfg=cfg, ) # train model @@ -169,11 +160,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) @@ -207,11 +193,6 @@ def evaluate(cfg: DictConfig): geom["geo"], { "dataset": "NamedArrayDataset", - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "total_size": cfg.EVAL.total_size.validator, "batch_size": cfg.EVAL.batch_size.validator, }, @@ -251,13 +232,9 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - seed=cfg.seed, - log_freq=cfg.log_freq, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # evaluate after finished training solver.eval() diff --git a/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml b/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml index fdc2062e7..d4c672a31 100644 --- a/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml +++ b/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py b/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py index 379646090..43bdca77f 100644 --- a/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py +++ b/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np @@ -24,12 +23,6 @@ def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -180,7 +173,6 @@ def train(cfg: DictConfig): "dataset": "NamedArrayDataset", "total_size": NPOINT_EVAL, "batch_size": cfg.EVAL.batch_size, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("mean"), metric={"MSE": ppsci.metric.MSE()}, @@ -207,18 +199,11 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model solver.train() @@ -229,12 +214,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -273,7 +252,6 @@ def evaluate(cfg: DictConfig): "dataset": "NamedArrayDataset", "total_size": NPOINT_EVAL, "batch_size": cfg.EVAL.batch_size, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("mean"), metric={"MSE": ppsci.metric.MSE()}, @@ -299,11 +277,9 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - geom=geom, - output_dir=cfg.output_dir, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # evaluate solver.eval() diff --git a/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml b/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml index 38df8b078..363bac8a2 100644 --- a/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml +++ b/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml b/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml index 20003f2a7..02038ad1a 100644 --- a/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml +++ b/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -15,6 +25,9 @@ hydra: - output_dir - log_freq - EMBEDDING_MODEL_PATH + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/cylinder/2d_unsteady/transformer_physx/train_enn.py b/examples/cylinder/2d_unsteady/transformer_physx/train_enn.py index 021f6d0d4..1f213f30d 100644 --- a/examples/cylinder/2d_unsteady/transformer_physx/train_enn.py +++ b/examples/cylinder/2d_unsteady/transformer_physx/train_enn.py @@ -18,7 +18,6 @@ # This file is for step1: training a embedding model. # This file is based on PaddleScience/ppsci API. -from os import path as osp import hydra import numpy as np @@ -26,7 +25,6 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def get_mean_std(data: np.ndarray, visc: np.ndarray): @@ -50,11 +48,6 @@ def get_mean_std(data: np.ndarray, visc: np.ndarray): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - weights = (10.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 10.0 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) @@ -133,11 +126,6 @@ def train(cfg: DictConfig): key: value for key, value in zip(cfg.MODEL.output_keys, weights) }, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -170,11 +158,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - weights = (10.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 10.0 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) @@ -238,11 +221,6 @@ def evaluate(cfg: DictConfig): key: value for key, value in zip(cfg.MODEL.output_keys, weights) }, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } diff --git a/examples/cylinder/2d_unsteady/transformer_physx/train_transformer.py b/examples/cylinder/2d_unsteady/transformer_physx/train_transformer.py index 34eb6c288..e3d8fea0a 100644 --- a/examples/cylinder/2d_unsteady/transformer_physx/train_transformer.py +++ b/examples/cylinder/2d_unsteady/transformer_physx/train_transformer.py @@ -18,7 +18,6 @@ # This file is for step2: training a transformer model, based on frozen pretrained embedding model. # This file is based on PaddleScience/ppsci API. -from os import path as osp from typing import Dict import hydra @@ -28,7 +27,6 @@ import ppsci from ppsci.arch import base -from ppsci.utils import logger from ppsci.utils import save_load @@ -56,11 +54,6 @@ def __call__(self, x: Dict[str, paddle.Tensor]) -> Dict[str, paddle.Tensor]: def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) output_transform = OutputTransform(embedding_model) @@ -117,11 +110,6 @@ def train(cfg: DictConfig): "stride": 1024, "embedding_model": embedding_model, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -185,9 +173,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # directly evaluate pretrained model(optional) - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) output_transform = OutputTransform(embedding_model) @@ -205,11 +190,6 @@ def evaluate(cfg: DictConfig): "stride": 1024, "embedding_model": embedding_model, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } diff --git a/examples/darcy/conf/darcy2d.yaml b/examples/darcy/conf/darcy2d.yaml index ec114d1c3..352c81a16 100644 --- a/examples/darcy/conf/darcy2d.yaml +++ b/examples/darcy/conf/darcy2d.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/darcy/darcy2d.py b/examples/darcy/darcy2d.py index 12e32af17..a9d4352f8 100644 --- a/examples/darcy/darcy2d.py +++ b/examples/darcy/darcy2d.py @@ -25,11 +25,6 @@ def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -94,7 +89,6 @@ def poisson_ref_compute_func(_in): "dataset": "NamedArrayDataset", "total_size": cfg.NPOINT_PDE, "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("sum"), evenly=True, @@ -149,17 +143,11 @@ def poisson_ref_compute_func(_in): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, + cfg=cfg, ) # train model solver.train() @@ -180,15 +168,14 @@ def poisson_ref_compute_func(_in): constraint, OUTPUT_DIR, optimizer_lbfgs, - None, - EPOCHS, - cfg.TRAIN.lbfgs.iters_per_epoch, + epochs=EPOCHS, + iters_per_epoch=cfg.TRAIN.lbfgs.iters_per_epoch, eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, eval_freq=cfg.TRAIN.lbfgs.eval_freq, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, + cfg=cfg, ) # train model solver.train() @@ -199,11 +186,6 @@ def poisson_ref_compute_func(_in): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -231,7 +213,6 @@ def poisson_ref_compute_func(_in): "dataset": "NamedArrayDataset", "total_size": cfg.NPOINT_PDE, "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("sum"), evenly=True, @@ -284,12 +265,10 @@ def poisson_ref_compute_func(_in): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() # visualize prediction diff --git a/examples/deepcfd/conf/deepcfd.yaml b/examples/deepcfd/conf/deepcfd.yaml index f0780b9ec..e13b3dcc0 100644 --- a/examples/deepcfd/conf/deepcfd.yaml +++ b/examples/deepcfd/conf/deepcfd.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/deepcfd/deepcfd.py b/examples/deepcfd/deepcfd.py index 3dddcb7ff..5a0cb7b0c 100644 --- a/examples/deepcfd/deepcfd.py +++ b/examples/deepcfd/deepcfd.py @@ -24,7 +24,6 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def split_tensors( @@ -199,11 +198,6 @@ def predict_and_save_plot( def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", os.path.join(cfg.output_dir, "train.log"), "info") - # initialize datasets with open(cfg.DATAX_PATH, "rb") as file: x = pickle.load(file) @@ -279,11 +273,6 @@ def loss_expr( "label": {"output": test_y}, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } def metric_expr( @@ -317,15 +306,9 @@ def metric_expr( solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, + optimizer=optimizer, validator=validator, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model @@ -342,11 +325,6 @@ def metric_expr( def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", os.path.join(cfg.output_dir, "eval.log"), "info") - # initialize datasets with open(cfg.DATAX_PATH, "rb") as file: x = pickle.load(file) @@ -396,11 +374,6 @@ def loss_expr( "label": {"output": test_y}, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } def metric_expr( @@ -433,11 +406,8 @@ def metric_expr( # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - seed=cfg.seed, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate diff --git a/examples/deephpms/burgers.py b/examples/deephpms/burgers.py index 296241775..b07019508 100644 --- a/examples/deephpms/burgers.py +++ b/examples/deephpms/burgers.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np @@ -57,10 +56,6 @@ def boundary_loss_func(output_dict, *args): def train(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize burgers boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(cfg.T_UB) @@ -158,13 +153,9 @@ def transform_f_sol(_in): solver = ppsci.solver.Solver( model_list, constraint_idn, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_idn, validator=validator_idn, + cfg=cfg, ) # train model @@ -222,13 +213,9 @@ def transform_f_sol(_in): solver = ppsci.solver.Solver( model_list, constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_pde, validator=validator_pde, + cfg=cfg, ) # train model @@ -323,13 +310,9 @@ def transform_f_sol(_in): solver = ppsci.solver.Solver( model_list, constraint_sol, - cfg.output_dir, - optimizer_sol, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_sol, validator=validator_sol, + cfg=cfg, ) # train model @@ -339,10 +322,6 @@ def transform_f_sol(_in): def evaluate(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize burgers boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(cfg.T_UB) diff --git a/examples/deephpms/conf/burgers.yaml b/examples/deephpms/conf/burgers.yaml index bcc148304..cbaaa5e8b 100644 --- a/examples/deephpms/conf/burgers.yaml +++ b/examples/deephpms/conf/burgers.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/deephpms/conf/korteweg_de_vries.yaml b/examples/deephpms/conf/korteweg_de_vries.yaml index a73c3830d..750e0e29d 100644 --- a/examples/deephpms/conf/korteweg_de_vries.yaml +++ b/examples/deephpms/conf/korteweg_de_vries.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/deephpms/conf/kuramoto_sivashinsky.yaml b/examples/deephpms/conf/kuramoto_sivashinsky.yaml index 68e8e0b27..e0b3fc312 100644 --- a/examples/deephpms/conf/kuramoto_sivashinsky.yaml +++ b/examples/deephpms/conf/kuramoto_sivashinsky.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/deephpms/conf/navier_stokes.yaml b/examples/deephpms/conf/navier_stokes.yaml index 748b7f0d3..c8cf10519 100644 --- a/examples/deephpms/conf/navier_stokes.yaml +++ b/examples/deephpms/conf/navier_stokes.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/deephpms/conf/schrodinger.yaml b/examples/deephpms/conf/schrodinger.yaml index f68f84462..7cb270d7f 100644 --- a/examples/deephpms/conf/schrodinger.yaml +++ b/examples/deephpms/conf/schrodinger.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/deephpms/korteweg_de_vries.py b/examples/deephpms/korteweg_de_vries.py index b491e0557..85835a75b 100644 --- a/examples/deephpms/korteweg_de_vries.py +++ b/examples/deephpms/korteweg_de_vries.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np @@ -63,10 +62,6 @@ def train(cfg: DictConfig): # open FLAG for higher order differential operator when order >= 4 paddle.framework.core.set_prim_eager_enabled(True) - ppsci.utils.misc.set_random_seed(42) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(cfg.T_UB) @@ -165,13 +160,9 @@ def transform_f_sol(_in): solver = ppsci.solver.Solver( model_list, constraint_idn, - cfg.output_dir, optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, validator=validator_idn, + cfg=cfg, ) # train model @@ -229,13 +220,9 @@ def transform_f_sol(_in): solver = ppsci.solver.Solver( model_list, constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_pde, validator=validator_pde, + cfg=cfg, ) # train model @@ -330,13 +317,9 @@ def transform_f_sol(_in): solver = ppsci.solver.Solver( model_list, constraint_sol, - cfg.output_dir, - optimizer_sol, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_sol, validator=validator_sol, + cfg=cfg, ) # train model @@ -349,10 +332,6 @@ def evaluate(cfg: DictConfig): # open FLAG for higher order differential operator when order >= 4 paddle.framework.core.set_prim_eager_enabled(True) - ppsci.utils.misc.set_random_seed(42) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(cfg.T_UB) diff --git a/examples/deephpms/kuramoto_sivashinsky.py b/examples/deephpms/kuramoto_sivashinsky.py index 2ea2660c0..5bd6e63f5 100644 --- a/examples/deephpms/kuramoto_sivashinsky.py +++ b/examples/deephpms/kuramoto_sivashinsky.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np @@ -66,10 +65,6 @@ def train(cfg: DictConfig): # open FLAG for higher order differential operator when order >= 4 paddle.framework.core.set_prim_eager_enabled(True) - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(cfg.T_UB) @@ -168,13 +163,9 @@ def transform_f_idn(_in): solver = ppsci.solver.Solver( model_list, constraint_idn, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_idn, validator=validator_idn, + cfg=cfg, ) # train model @@ -232,13 +223,9 @@ def transform_f_idn(_in): solver = ppsci.solver.Solver( model_list, constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_pde, validator=validator_pde, + cfg=cfg, ) # train model @@ -330,13 +317,9 @@ def transform_f_idn(_in): solver = ppsci.solver.Solver( model_list, constraint_sol, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_idn, validator=validator_sol, + cfg=cfg, ) # train model @@ -349,10 +332,6 @@ def evaluate(cfg: DictConfig): # open FLAG for higher order differential operator when order >= 4 paddle.framework.core.set_prim_eager_enabled(True) - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(cfg.T_UB) diff --git a/examples/deephpms/navier_stokes.py b/examples/deephpms/navier_stokes.py index b7bbffa01..442a5596f 100644 --- a/examples/deephpms/navier_stokes.py +++ b/examples/deephpms/navier_stokes.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np @@ -43,10 +42,6 @@ def pde_l2_rel_func(output_dict, *args): def train(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries # t, x, y lb = paddle.to_tensor(list(cfg.LB)) @@ -148,11 +143,6 @@ def transform_f(_in): }, }, "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator_idn = ppsci.validate.SupervisedValidator( @@ -168,13 +158,9 @@ def transform_f(_in): solver = ppsci.solver.Solver( model_list, constraint_idn, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_idn, validator=validator_idn, + cfg=cfg, ) # train model @@ -229,11 +215,6 @@ def transform_f(_in): }, }, "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator_pde = ppsci.validate.SupervisedValidator( @@ -252,13 +233,9 @@ def transform_f(_in): solver = ppsci.solver.Solver( model_list, constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_pde, validator=validator_pde, + cfg=cfg, ) # train model @@ -338,11 +315,6 @@ def transform_f(_in): }, }, "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator_sol = ppsci.validate.SupervisedValidator( @@ -358,13 +330,9 @@ def transform_f(_in): solver = ppsci.solver.Solver( model_list, constraint_sol, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_idn, validator=validator_sol, + cfg=cfg, ) # train model @@ -374,10 +342,6 @@ def transform_f(_in): def evaluate(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries # t, x, y lb = paddle.to_tensor(list(cfg.LB)) diff --git a/examples/deephpms/schrodinger.py b/examples/deephpms/schrodinger.py index 4ab910b2b..4c51323d4 100644 --- a/examples/deephpms/schrodinger.py +++ b/examples/deephpms/schrodinger.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np @@ -74,10 +73,6 @@ def sol_l2_rel_func(output_dict, label_dict): def train(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(np.pi / cfg.T_UB) @@ -184,11 +179,6 @@ def transform_fg(_in): }, }, "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator_idn = ppsci.validate.SupervisedValidator( @@ -204,13 +194,9 @@ def transform_fg(_in): solver = ppsci.solver.Solver( model_list, constraint_idn, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_idn, validator=validator_idn, + cfg=cfg, ) # train model @@ -263,11 +249,6 @@ def transform_fg(_in): }, }, "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator_pde = ppsci.validate.SupervisedValidator( @@ -288,13 +269,9 @@ def transform_fg(_in): solver = ppsci.solver.Solver( model_list, constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_pde, validator=validator_pde, + cfg=cfg, ) # train model @@ -389,11 +366,6 @@ def transform_fg(_in): }, }, "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator_sol = ppsci.validate.SupervisedValidator( @@ -409,13 +381,9 @@ def transform_fg(_in): solver = ppsci.solver.Solver( model_list, constraint_sol, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_idn, validator=validator_sol, + cfg=cfg, ) # train model @@ -425,10 +393,6 @@ def transform_fg(_in): def evaluate(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(np.pi / cfg.T_UB) diff --git a/examples/dgmr/conf/dgmr.yaml b/examples/dgmr/conf/dgmr.yaml index f031f94d6..00b4b224d 100644 --- a/examples/dgmr/conf/dgmr.yaml +++ b/examples/dgmr/conf/dgmr.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/dgmr/dgmr.py b/examples/dgmr/dgmr.py index 52bd41332..95ceb9b33 100644 --- a/examples/dgmr/dgmr.py +++ b/examples/dgmr/dgmr.py @@ -202,7 +202,7 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.model.eval() diff --git a/examples/earthformer/conf/earthformer_enso_pretrain.yaml b/examples/earthformer/conf/earthformer_enso_pretrain.yaml index 8f0919a5d..541b96d52 100644 --- a/examples/earthformer/conf/earthformer_enso_pretrain.yaml +++ b/examples/earthformer/conf/earthformer_enso_pretrain.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/earthformer/conf/earthformer_sevir_pretrain.yaml b/examples/earthformer/conf/earthformer_sevir_pretrain.yaml index 7bff22e88..29d31e9e1 100644 --- a/examples/earthformer/conf/earthformer_sevir_pretrain.yaml +++ b/examples/earthformer/conf/earthformer_sevir_pretrain.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/earthformer/earthformer_enso_train.py b/examples/earthformer/earthformer_enso_train.py index 120654c70..ea0bd2c5d 100644 --- a/examples/earthformer/earthformer_enso_train.py +++ b/examples/earthformer/earthformer_enso_train.py @@ -131,11 +131,9 @@ def train(cfg: DictConfig): constraint, cfg.output_dir, optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=ITERS_PER_EPOCH, eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, validator=validator, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, @@ -185,7 +183,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, diff --git a/examples/epnn/conf/epnn.yaml b/examples/epnn/conf/epnn.yaml index 68773e8f4..56a78ee06 100644 --- a/examples/epnn/conf/epnn.yaml +++ b/examples/epnn/conf/epnn.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/epnn/epnn.py b/examples/epnn/epnn.py index 0c161b185..139a817c5 100755 --- a/examples/epnn/epnn.py +++ b/examples/epnn/epnn.py @@ -16,23 +16,15 @@ Reference: https://github.com/meghbali/ANNElastoplasticity """ -from os import path as osp import functions import hydra from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - ( input_dict_train, label_dict_train, @@ -106,15 +98,9 @@ def _transform_in_stress(_in): solver = ppsci.solver.Solver( model_list_obj, constraint_pde, - cfg.output_dir, - optimizer_list, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_list, validator=validator_pde, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model @@ -123,11 +109,6 @@ def _transform_in_stress(_in): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - ( input_dict_train, _, @@ -184,10 +165,8 @@ def _transform_in_stress(_in): # initialize solver solver = ppsci.solver.Solver( model_list_obj, - output_dir=cfg.output_dir, validator=validator_pde, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate solver.eval() diff --git a/examples/euler_beam/conf/euler_beam.yaml b/examples/euler_beam/conf/euler_beam.yaml index b117460a4..6827f2251 100644 --- a/examples/euler_beam/conf/euler_beam.yaml +++ b/examples/euler_beam/conf/euler_beam.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,17 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/euler_beam/euler_beam.py b/examples/euler_beam/euler_beam.py index 48cf3ac63..0ea84d33f 100644 --- a/examples/euler_beam/euler_beam.py +++ b/examples/euler_beam/euler_beam.py @@ -108,21 +108,11 @@ def u_solution_func(out): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, + optimizer=optimizer, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - to_static=cfg.to_static, + cfg=cfg, ) # train model solver.train() @@ -181,17 +171,10 @@ def u_solution_func(out): # initialize solver solver = ppsci.solver.Solver( model, - None, - cfg.output_dir, - None, - seed=cfg.seed, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - to_static=cfg.to_static, + cfg=cfg, ) # evaluate after finished training solver.eval() @@ -206,7 +189,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/fourcastnet/conf/fourcastnet_finetune.yaml b/examples/fourcastnet/conf/fourcastnet_finetune.yaml index 0854a1743..b3300adc5 100644 --- a/examples/fourcastnet/conf/fourcastnet_finetune.yaml +++ b/examples/fourcastnet/conf/fourcastnet_finetune.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/fourcastnet/conf/fourcastnet_precip.yaml b/examples/fourcastnet/conf/fourcastnet_precip.yaml index 92374e963..c8134f67a 100644 --- a/examples/fourcastnet/conf/fourcastnet_precip.yaml +++ b/examples/fourcastnet/conf/fourcastnet_precip.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/fourcastnet/conf/fourcastnet_pretrain.yaml b/examples/fourcastnet/conf/fourcastnet_pretrain.yaml index 3a4088cd2..b8dd24664 100644 --- a/examples/fourcastnet/conf/fourcastnet_pretrain.yaml +++ b/examples/fourcastnet/conf/fourcastnet_pretrain.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/fourcastnet/train_finetune.py b/examples/fourcastnet/train_finetune.py index 3d6e2a781..9ea102e45 100644 --- a/examples/fourcastnet/train_finetune.py +++ b/examples/fourcastnet/train_finetune.py @@ -13,7 +13,6 @@ # limitations under the License. import functools -from os import path as osp from typing import Tuple import h5py @@ -24,7 +23,6 @@ import examples.fourcastnet.utils as fourcast_utils import ppsci -from ppsci.utils import logger def get_vis_data( @@ -56,12 +54,6 @@ def get_vis_data( def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set training hyper-parameters output_keys = tuple(f"output_{i}" for i in range(cfg.TRAIN.num_timestamps)) @@ -123,11 +115,6 @@ def train(cfg: DictConfig): "num_label_timestamps": cfg.TRAIN.num_timestamps, "training": False, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, } @@ -177,9 +164,8 @@ def train(cfg: DictConfig): constraint, cfg.output_dir, optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=ITERS_PER_EPOCH, eval_during_train=True, validator=validator, pretrained_model_path=cfg.TRAIN.pretrained_model_path, @@ -193,11 +179,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set testing hyper-parameters output_keys = tuple(f"output_{i}" for i in range(cfg.EVAL.num_timestamps)) @@ -238,11 +219,6 @@ def evaluate(cfg: DictConfig): "training": False, "stride": 8, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, } @@ -321,12 +297,9 @@ def output_wind_func(d, var_name, data_mean, data_std): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) solver.eval() # visualize prediction from pretrained_model_path diff --git a/examples/fourcastnet/train_precip.py b/examples/fourcastnet/train_precip.py index d7ea5ad0d..7ca544bec 100644 --- a/examples/fourcastnet/train_precip.py +++ b/examples/fourcastnet/train_precip.py @@ -13,7 +13,6 @@ # limitations under the License. import functools -import os.path as osp from typing import Tuple import h5py @@ -24,7 +23,6 @@ import examples.fourcastnet.utils as fourcast_utils import ppsci -from ppsci.utils import logger def get_vis_data( @@ -58,11 +56,6 @@ def get_vis_data( def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", f"{cfg.output_dir}/train.log", "info") - wind_data_mean, wind_data_std = fourcast_utils.get_mean_std( cfg.WIND_MEAN_PATH, cfg.WIND_STD_PATH, cfg.VARS_CHANNEL ) @@ -126,11 +119,6 @@ def train(cfg: DictConfig): "transforms": transforms, "training": False, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, } @@ -188,11 +176,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set testing hyper-parameters output_keys = tuple(f"output_{i}" for i in range(cfg.EVAL.num_timestamps)) @@ -243,11 +226,6 @@ def evaluate(cfg: DictConfig): "transforms": transforms, "training": False, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, } # set metirc @@ -318,12 +296,9 @@ def output_precip_func(d, var_name): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) solver.eval() # visualize prediction diff --git a/examples/fourcastnet/train_pretrain.py b/examples/fourcastnet/train_pretrain.py index f6699612b..4c7cce976 100644 --- a/examples/fourcastnet/train_pretrain.py +++ b/examples/fourcastnet/train_pretrain.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np @@ -21,7 +20,6 @@ import examples.fourcastnet.utils as fourcast_utils import ppsci -from ppsci.utils import logger def get_data_stat(cfg: DictConfig): @@ -38,11 +36,6 @@ def get_data_stat(cfg: DictConfig): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - data_mean, data_std = fourcast_utils.get_mean_std( cfg.DATA_MEAN_PATH, cfg.DATA_STD_PATH, cfg.VARS_CHANNEL ) @@ -119,11 +112,6 @@ def train(cfg: DictConfig): "transforms": transforms, "training": False, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, } @@ -170,7 +158,6 @@ def train(cfg: DictConfig): cfg.TRAIN.epochs, ITERS_PER_EPOCH, eval_during_train=True, - seed=cfg.seed, validator=validator, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, @@ -182,11 +169,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - data_mean, data_std = fourcast_utils.get_mean_std( cfg.DATA_MEAN_PATH, cfg.DATA_STD_PATH, cfg.VARS_CHANNEL ) @@ -214,11 +196,6 @@ def evaluate(cfg: DictConfig): "transforms": transforms, "training": False, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, } @@ -251,13 +228,8 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate solver.eval() diff --git a/examples/fpde/conf/fractional_poisson_2d.yaml b/examples/fpde/conf/fractional_poisson_2d.yaml index c0b657b82..9a0564d6e 100644 --- a/examples/fpde/conf/fractional_poisson_2d.yaml +++ b/examples/fpde/conf/fractional_poisson_2d.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/fsi/conf/viv.yaml b/examples/fsi/conf/viv.yaml index 566769d21..56037e231 100644 --- a/examples/fsi/conf/viv.yaml +++ b/examples/fsi/conf/viv.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: diff --git a/examples/gpinn/conf/poisson_1d.yaml b/examples/gpinn/conf/poisson_1d.yaml index 0bab9662f..3125b8d08 100644 --- a/examples/gpinn/conf/poisson_1d.yaml +++ b/examples/gpinn/conf/poisson_1d.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/gpinn/poisson_1d.py b/examples/gpinn/poisson_1d.py index a8cb3da1b..8e43baca4 100644 --- a/examples/gpinn/poisson_1d.py +++ b/examples/gpinn/poisson_1d.py @@ -26,7 +26,6 @@ import ppsci from ppsci.autodiff import jacobian -from ppsci.utils import logger class gPINN1D(ppsci.equation.PDE): @@ -55,11 +54,6 @@ def __init__(self, invar: str, outvar: str): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -123,7 +117,6 @@ def u_solution(in_): "dataset": "NamedArrayDataset", "total_size": cfg.NPOINT_PDE_EVAL, "batch_size": cfg.EVAL.batch_size.l2rel_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("mean"), evenly=True, @@ -136,18 +129,10 @@ def u_solution(in_): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, - geom=geom, validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model solver.train() @@ -214,11 +199,6 @@ def du_x(x: np.ndarray) -> np.ndarray: def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -253,7 +233,6 @@ def u_solution(in_): "dataset": "NamedArrayDataset", "total_size": cfg.NPOINT_PDE, "batch_size": cfg.EVAL.batch_size.l2rel_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("mean"), evenly=True, @@ -265,10 +244,8 @@ def u_solution(in_): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - geom=geom, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # evaluate after finished training solver.eval() diff --git a/examples/heat_exchanger/conf/heat_exchanger.yaml b/examples/heat_exchanger/conf/heat_exchanger.yaml index 0ad2e5a7e..76d648351 100644 --- a/examples/heat_exchanger/conf/heat_exchanger.yaml +++ b/examples/heat_exchanger/conf/heat_exchanger.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/heat_exchanger/heat_exchanger.py b/examples/heat_exchanger/heat_exchanger.py index 2479aa4f3..3b5e739d7 100644 --- a/examples/heat_exchanger/heat_exchanger.py +++ b/examples/heat_exchanger/heat_exchanger.py @@ -13,8 +13,6 @@ # limitations under the License. -from os import path as osp - import hydra import matplotlib.pyplot as plt import numpy as np @@ -25,11 +23,6 @@ def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.HEDeepONets(**cfg.MODEL) @@ -293,11 +286,6 @@ def train(cfg: DictConfig): "label": test_bc_label, }, "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), output_expr={"T_h": lambda out: out["T_h"] - cfg.T_hin}, @@ -312,11 +300,6 @@ def train(cfg: DictConfig): "label": test_bc_label, }, "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), output_expr={"T_h": lambda out: out["T_c"] - cfg.T_cin}, @@ -331,11 +314,6 @@ def train(cfg: DictConfig): "label": test_interior_label, }, "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), output_expr=equation["heat_exchanger"].equations, @@ -352,16 +330,10 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, - geom=geom, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -378,11 +350,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.HEDeepONets(**cfg.MODEL) @@ -463,11 +430,6 @@ def evaluate(cfg: DictConfig): "label": test_bc_label, }, "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), output_expr={ @@ -484,11 +446,6 @@ def evaluate(cfg: DictConfig): "label": test_bc_label, }, "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), output_expr={ @@ -505,11 +462,6 @@ def evaluate(cfg: DictConfig): "label": test_interior_label, }, "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), output_expr=equation["heat_exchanger"].equations, @@ -525,11 +477,9 @@ def evaluate(cfg: DictConfig): # directly evaluate pretrained model(optional) solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, equation=equation, - geom=geom, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() diff --git a/examples/heat_pinn/conf/heat_pinn.yaml b/examples/heat_pinn/conf/heat_pinn.yaml index 5bf4032e4..377e14471 100644 --- a/examples/heat_pinn/conf/heat_pinn.yaml +++ b/examples/heat_pinn/conf/heat_pinn.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/heat_pinn/heat_pinn.py b/examples/heat_pinn/heat_pinn.py index cfdf0b4b4..4182fcecc 100644 --- a/examples/heat_pinn/heat_pinn.py +++ b/examples/heat_pinn/heat_pinn.py @@ -93,12 +93,6 @@ def plot(input_data, N_EVAL, pinn_output, fdm_output, cfg): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # set output directory - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -185,17 +179,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - seed=cfg.seed, + optimizer=optimizer, equation=equation, - geom=geom, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model solver.train() @@ -213,12 +199,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # set output directory - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -228,10 +208,7 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # begin eval N_EVAL = 100 diff --git a/examples/hpinns/conf/hpinns.yaml b/examples/hpinns/conf/hpinns.yaml index 23d81ee1b..71a92cc36 100644 --- a/examples/hpinns/conf/hpinns.yaml +++ b/examples/hpinns/conf/hpinns.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/hpinns/holography.py b/examples/hpinns/holography.py index 290f2b3c2..144749f29 100644 --- a/examples/hpinns/holography.py +++ b/examples/hpinns/holography.py @@ -16,7 +16,6 @@ This module is heavily adapted from https://github.com/lululxvi/hpinn """ -from os import path as osp import functions as func_module import hydra @@ -35,10 +34,6 @@ def train(cfg: DictConfig): # open FLAG for higher order differential operator paddle.framework.core.set_prim_eager_enabled(True) - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - model_re = ppsci.arch.MLP(**cfg.MODEL.re_net) model_im = ppsci.arch.MLP(**cfg.MODEL.im_net) model_eps = ppsci.arch.MLP(**cfg.MODEL.eps_net) @@ -184,14 +179,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model_list, constraint, - cfg.output_dir, - optimizer_adam, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_adam, validator=validator, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model @@ -209,14 +199,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model_list, constraint, - cfg.output_dir, - optimizer_lbfgs, - None, - cfg.TRAIN.epochs_lbfgs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_lbfgs, validator=validator, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model @@ -259,14 +244,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model_list, constraint, - cfg.output_dir, - optimizer_lbfgs, - None, - cfg.TRAIN.epochs_lbfgs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_lbfgs, validator=validator, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model @@ -296,10 +276,6 @@ def evaluate(cfg: DictConfig): # open FLAG for higher order differential operator paddle.framework.core.set_prim_eager_enabled(True) - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - model_re = ppsci.arch.MLP(**cfg.MODEL.re_net) model_im = ppsci.arch.MLP(**cfg.MODEL.im_net) model_eps = ppsci.arch.MLP(**cfg.MODEL.eps_net) @@ -399,10 +375,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model_list, - output_dir=cfg.output_dir, - seed=cfg.seed, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # evaluate diff --git a/examples/ide/conf/volterra_ide.yaml b/examples/ide/conf/volterra_ide.yaml index 1670b7352..cda361ea5 100644 --- a/examples/ide/conf/volterra_ide.yaml +++ b/examples/ide/conf/volterra_ide.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/ide/volterra_ide.py b/examples/ide/volterra_ide.py index 2f4c473ed..8f4e0c1a4 100644 --- a/examples/ide/volterra_ide.py +++ b/examples/ide/volterra_ide.py @@ -26,16 +26,9 @@ import ppsci from ppsci.autodiff import jacobian -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # set output directory - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -164,18 +157,10 @@ def u_solution_func(in_): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, - geom=geom, validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -195,12 +180,6 @@ def u_solution_func(in_): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # set output directory - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -231,11 +210,8 @@ def u_solution_func(in_) -> np.ndarray: # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - geom=geom, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate model solver.eval() @@ -261,7 +237,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/laplace/conf/laplace2d.yaml b/examples/laplace/conf/laplace2d.yaml index 088f71975..20591a6f3 100644 --- a/examples/laplace/conf/laplace2d.yaml +++ b/examples/laplace/conf/laplace2d.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/laplace/laplace2d.py b/examples/laplace/laplace2d.py index 39d6f959a..690e5ae4d 100644 --- a/examples/laplace/laplace2d.py +++ b/examples/laplace/laplace2d.py @@ -106,16 +106,11 @@ def u_solution_func(out): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, + cfg=cfg, ) # train model solver.train() @@ -178,13 +173,10 @@ def u_solution_func(out): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - seed=cfg.seed, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() # visualize prediction @@ -198,7 +190,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/ldc/conf/ldc2d_steady_Re10.yaml b/examples/ldc/conf/ldc2d_steady_Re10.yaml index f9bc05f44..cd877a47f 100644 --- a/examples/ldc/conf/ldc2d_steady_Re10.yaml +++ b/examples/ldc/conf/ldc2d_steady_Re10.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,14 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/ldc/conf/ldc2d_unsteady_Re10.yaml b/examples/ldc/conf/ldc2d_unsteady_Re10.yaml index 577dea688..acdd9c0bd 100644 --- a/examples/ldc/conf/ldc2d_unsteady_Re10.yaml +++ b/examples/ldc/conf/ldc2d_unsteady_Re10.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,14 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/ldc/ldc2d_steady_Re10.py b/examples/ldc/ldc2d_steady_Re10.py index 3c6507932..c36e8e435 100644 --- a/examples/ldc/ldc2d_steady_Re10.py +++ b/examples/ldc/ldc2d_steady_Re10.py @@ -11,22 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -121,7 +114,6 @@ def train(cfg: DictConfig): "dataset": "NamedArrayDataset", "total_size": NPOINT_EVAL, "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("sum"), evenly=True, @@ -146,18 +138,11 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model solver.train() @@ -168,11 +153,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -198,7 +178,6 @@ def evaluate(cfg: DictConfig): "dataset": "NamedArrayDataset", "total_size": NPOINT_EVAL, "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("sum"), evenly=True, @@ -222,12 +201,10 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() # visualize prediction for pretrained model(optional) diff --git a/examples/ldc/ldc2d_unsteady_Re10.py b/examples/ldc/ldc2d_unsteady_Re10.py index aeb88868c..27f83879b 100644 --- a/examples/ldc/ldc2d_unsteady_Re10.py +++ b/examples/ldc/ldc2d_unsteady_Re10.py @@ -11,22 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -142,7 +135,6 @@ def train(cfg: DictConfig): "dataset": "NamedArrayDataset", "total_size": NPOINT_EVAL, "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("sum"), evenly=True, @@ -189,17 +181,11 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, + cfg=cfg, ) # train model solver.train() @@ -210,11 +196,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -250,7 +231,6 @@ def evaluate(cfg: DictConfig): "dataset": "NamedArrayDataset", "total_size": NPOINT_EVAL, "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("sum"), evenly=True, @@ -296,12 +276,10 @@ def evaluate(cfg: DictConfig): # directly evaluate pretrained model(optional) solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() # visualize prediction for pretrained model(optional) diff --git a/examples/lorenz/conf/enn.yaml b/examples/lorenz/conf/enn.yaml index 6004bc04e..b3c6cd811 100644 --- a/examples/lorenz/conf/enn.yaml +++ b/examples/lorenz/conf/enn.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/lorenz/conf/transformer.yaml b/examples/lorenz/conf/transformer.yaml index 35cb8aeae..ee3a20642 100644 --- a/examples/lorenz/conf/transformer.yaml +++ b/examples/lorenz/conf/transformer.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,16 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq - - EMBEDDING_MODEL_PATH + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/lorenz/train_enn.py b/examples/lorenz/train_enn.py index d3450e297..c7c40a6af 100644 --- a/examples/lorenz/train_enn.py +++ b/examples/lorenz/train_enn.py @@ -18,15 +18,12 @@ # This file is for step1: training a embedding model. # This file is based on PaddleScience/ppsci API. -from os import path as osp - import hydra import numpy as np import paddle from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def get_mean_std(data: np.ndarray): @@ -40,11 +37,6 @@ def get_mean_std(data: np.ndarray): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e4 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) @@ -121,11 +113,6 @@ def train(cfg: DictConfig): key: value for key, value in zip(cfg.MODEL.output_keys, weights) }, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -144,9 +131,8 @@ def train(cfg: DictConfig): constraint, cfg.output_dir, optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=ITERS_PER_EPOCH, eval_during_train=True, validator=validator, ) @@ -157,11 +143,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e4 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) @@ -223,11 +204,6 @@ def evaluate(cfg: DictConfig): key: value for key, value in zip(cfg.MODEL.output_keys, weights) }, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -242,9 +218,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() diff --git a/examples/lorenz/train_transformer.py b/examples/lorenz/train_transformer.py index a68c404b9..944add966 100644 --- a/examples/lorenz/train_transformer.py +++ b/examples/lorenz/train_transformer.py @@ -56,11 +56,6 @@ def train(cfg: DictConfig): # train time-series: 2048 time-steps: 256 block-size: 64 stride: 64 # valid time-series: 64 time-steps: 1024 block-size: 256 stride: 1024 # test time-series: 256 time-steps: 1024 - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) output_transform = OutputTransform(embedding_model) @@ -117,11 +112,6 @@ def train(cfg: DictConfig): "stride": 1024, "embedding_model": embedding_model, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -159,9 +149,8 @@ def train(cfg: DictConfig): constraint, cfg.output_dir, optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=ITERS_PER_EPOCH, eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, validator=validator, @@ -196,11 +185,6 @@ def evaluate(cfg: DictConfig): "stride": 1024, "embedding_model": embedding_model, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -235,10 +219,9 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() # visualize prediction for pretrained model(optional) @@ -259,7 +242,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/neuraloperator/conf/sfno_swe_pretrain.yaml b/examples/neuraloperator/conf/sfno_swe_pretrain.yaml index 09ce6522b..f0a8b811f 100644 --- a/examples/neuraloperator/conf/sfno_swe_pretrain.yaml +++ b/examples/neuraloperator/conf/sfno_swe_pretrain.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/neuraloperator/conf/tfno_darcyflow_pretrain.yaml b/examples/neuraloperator/conf/tfno_darcyflow_pretrain.yaml index b6f7aafd5..104cb6d78 100644 --- a/examples/neuraloperator/conf/tfno_darcyflow_pretrain.yaml +++ b/examples/neuraloperator/conf/tfno_darcyflow_pretrain.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/neuraloperator/conf/uno_darcyflow_pretrain.yaml b/examples/neuraloperator/conf/uno_darcyflow_pretrain.yaml index 72f20b410..25cfab256 100644 --- a/examples/neuraloperator/conf/uno_darcyflow_pretrain.yaml +++ b/examples/neuraloperator/conf/uno_darcyflow_pretrain.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/neuraloperator/train_sfno.py b/examples/neuraloperator/train_sfno.py index 50843c00e..92fe4ad52 100644 --- a/examples/neuraloperator/train_sfno.py +++ b/examples/neuraloperator/train_sfno.py @@ -53,11 +53,6 @@ def train(cfg: DictConfig): "test_resolutions": cfg.DATASET.test_resolutions, "data_split": "test_32x64", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -72,11 +67,6 @@ def train(cfg: DictConfig): "test_resolutions": cfg.DATASET.test_resolutions, "data_split": "test_64x128", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -140,7 +130,6 @@ def train(cfg: DictConfig): cfg.TRAIN.epochs, ITERS_PER_EPOCH, eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, validator=validator, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, @@ -164,11 +153,6 @@ def evaluate(cfg: DictConfig): "test_resolutions": cfg.DATASET.test_resolutions, "data_split": "test_32x64", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -183,11 +167,6 @@ def evaluate(cfg: DictConfig): "test_resolutions": cfg.DATASET.test_resolutions, "data_split": "test_64x128", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -223,7 +202,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, diff --git a/examples/neuraloperator/train_tfno.py b/examples/neuraloperator/train_tfno.py index 1612ae548..90c33b461 100644 --- a/examples/neuraloperator/train_tfno.py +++ b/examples/neuraloperator/train_tfno.py @@ -68,11 +68,6 @@ def train(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_16x16", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -92,11 +87,6 @@ def train(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_32x32", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -166,7 +156,6 @@ def train(cfg: DictConfig): cfg.TRAIN.epochs, ITERS_PER_EPOCH, eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, validator=validator, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, @@ -195,11 +184,6 @@ def evaluate(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_16x16", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -219,11 +203,6 @@ def evaluate(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_32x32", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -271,7 +250,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, diff --git a/examples/neuraloperator/train_uno.py b/examples/neuraloperator/train_uno.py index f73c2a103..9770bfbb7 100644 --- a/examples/neuraloperator/train_uno.py +++ b/examples/neuraloperator/train_uno.py @@ -68,11 +68,6 @@ def train(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_16x16", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -92,11 +87,6 @@ def train(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_32x32", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -166,7 +156,6 @@ def train(cfg: DictConfig): cfg.TRAIN.epochs, ITERS_PER_EPOCH, eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, validator=validator, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, @@ -195,11 +184,6 @@ def evaluate(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_16x16", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -219,11 +203,6 @@ def evaluate(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_32x32", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -271,7 +250,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, diff --git a/examples/nowcastnet/conf/nowcastnet.yaml b/examples/nowcastnet/conf/nowcastnet.yaml index 088a4ee4e..52b72b0f3 100644 --- a/examples/nowcastnet/conf/nowcastnet.yaml +++ b/examples/nowcastnet/conf/nowcastnet.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,17 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/nowcastnet/nowcastnet.py b/examples/nowcastnet/nowcastnet.py index 9156907ac..c8374a3a8 100644 --- a/examples/nowcastnet/nowcastnet.py +++ b/examples/nowcastnet/nowcastnet.py @@ -8,7 +8,6 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): @@ -16,11 +15,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - if cfg.CASE_TYPE == "large": dataset_path = cfg.LARGE_DATASET_PATH model_cfg = cfg.MODEL.large @@ -57,7 +51,7 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, output_dir=output_dir, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) for batch_id, test_ims in enumerate(test_data_loader): diff --git a/examples/nsfnet/VP_NSFNet1.py b/examples/nsfnet/VP_NSFNet1.py index 8e917ab6f..c2f3ee942 100644 --- a/examples/nsfnet/VP_NSFNet1.py +++ b/examples/nsfnet/VP_NSFNet1.py @@ -66,7 +66,6 @@ def generate_data(N_TRAIN, lam, seed): def train(cfg: DictConfig): OUTPUT_DIR = cfg.output_dir - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") # set random seed for reproducibility SEED = cfg.seed @@ -110,11 +109,6 @@ def train(cfg: DictConfig): }, "batch_size": NB_TRAIN, "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } valida_dataloader_cfg = { @@ -125,11 +119,6 @@ def train(cfg: DictConfig): }, "total_size": u_star.shape[0], "batch_size": u_star.shape[0], - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } geom = ppsci.geometry.PointCloud({"x": x_train, "y": y_train}, ("x", "y")) @@ -197,19 +186,10 @@ def train(cfg: DictConfig): model=model, constraint=constraint, optimizer=optimizer, - epochs=EPOCHS, - lr_scheduler=lr_scheduler, - iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=False, - log_freq=cfg.log_freq, - eval_freq=cfg.eval_freq, - seed=SEED, equation=equation, - geom=geom, validator=validator, visualizer=None, - eval_with_no_grad=False, - output_dir=OUTPUT_DIR, + cfg=cfg, ) # train model @@ -230,21 +210,13 @@ def train(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( - model=model, - constraint=constraint, + model, + constraint, optimizer=optimizer, - epochs=EPOCHS, - iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=False, - log_freq=2000, - eval_freq=2000, - seed=SEED, equation=equation, - geom=geom, validator=validator, visualizer=None, - eval_with_no_grad=False, - output_dir=OUTPUT_DIR, + cfg=cfg, ) # train model solver.train() @@ -254,29 +226,15 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - OUTPUT_DIR = cfg.output_dir - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") - - # set random seed for reproducibility - SEED = cfg.seed - ppsci.utils.misc.set_random_seed(SEED) - # set model model = ppsci.arch.MLP(**cfg.MODEL) ppsci.utils.load_pretrain(model, cfg.pretrained_model_path) - # set the number of residual samples - N_TRAIN = cfg.ntrain - # set the Reynolds number and the corresponding lambda which is the parameter in the exact solution. Re = cfg.re lam = 0.5 * Re - np.sqrt(0.25 * (Re**2) + 4 * (np.pi**2)) - x_train = (np.random.rand(N_TRAIN, 1) - 1 / 3) * 3 / 2 - y_train = (np.random.rand(N_TRAIN, 1) - 1 / 4) * 2 - # generate test data - np.random.seed(SEED) x_star = ((np.random.rand(1000, 1) - 1 / 3) * 3 / 2).astype("float32") y_star = ((np.random.rand(1000, 1) - 1 / 4) * 2).astype("float32") u_star = 1 - np.exp(lam * x_star) * np.cos(2 * np.pi * y_star) @@ -291,15 +249,7 @@ def evaluate(cfg: DictConfig): }, "total_size": u_star.shape[0], "batch_size": u_star.shape[0], - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } - - geom = ppsci.geometry.PointCloud({"x": x_train, "y": y_train}, ("x", "y")) - # set equation constarint s.t. ||F(u)|| equation = { "NavierStokes": ppsci.equation.NavierStokes( @@ -326,8 +276,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, equation=equation, - geom=geom, validator=validator, + cfg=cfg, ) # eval model diff --git a/examples/nsfnet/VP_NSFNet2.py b/examples/nsfnet/VP_NSFNet2.py index 94419e573..e23189306 100644 --- a/examples/nsfnet/VP_NSFNet2.py +++ b/examples/nsfnet/VP_NSFNet2.py @@ -7,7 +7,6 @@ from scipy.interpolate import griddata import ppsci -from ppsci.utils import logger @hydra.main(version_base=None, config_path="./conf", config_name="VP_NSFNet2.yaml") @@ -112,12 +111,7 @@ def load_data(path, N_TRAIN, NB_TRAIN, N0_TRAIN): def train(cfg: DictConfig): - OUTPUT_DIR = cfg.output_dir - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") - # set random seed for reproducibility - SEED = cfg.seed - ppsci.utils.misc.set_random_seed(SEED) ITERS_PER_EPOCH = cfg.iters_per_epoch # set model @@ -162,11 +156,6 @@ def train(cfg: DictConfig): }, "batch_size": NB_TRAIN, "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } train_dataloader_cfg_0 = { @@ -177,11 +166,6 @@ def train(cfg: DictConfig): }, "batch_size": N0_TRAIN, "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } valida_dataloader_cfg = { @@ -192,11 +176,6 @@ def train(cfg: DictConfig): }, "total_size": u_star.shape[0], "batch_size": u_star.shape[0], - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } geom = ppsci.geometry.PointCloud( @@ -270,24 +249,15 @@ def train(cfg: DictConfig): )() optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) - logger.init_logger("ppsci", f"{OUTPUT_DIR}/eval.log", "info") # initialize solver solver = ppsci.solver.Solver( - model=model, - constraint=constraint, + model, + constraint, optimizer=optimizer, epochs=EPOCHS, - lr_scheduler=lr_scheduler, - iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=True, - log_freq=cfg.log_freq, - eval_freq=cfg.eval_freq, - seed=SEED, equation=equation, - geom=geom, validator=validator, - visualizer=None, - eval_with_no_grad=False, + cfg=cfg, ) # train model solver.train() @@ -300,19 +270,11 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): OUTPUT_DIR = cfg.output_dir - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") - - # set random seed for reproducibility - SEED = cfg.seed - ppsci.utils.misc.set_random_seed(SEED) # set model model = ppsci.arch.MLP(**cfg.MODEL) ppsci.utils.load_pretrain(model, cfg.pretrained_model_path) - # set the number of residual samples - N_TRAIN = cfg.ntrain - data = scipy.io.loadmat(cfg.data_dir) U_star = data["U_star"].astype("float32") # N x 2 x T @@ -321,38 +283,10 @@ def evaluate(cfg: DictConfig): X_star = data["X_star"].astype("float32") # N x 2 N = X_star.shape[0] - T = t_star.shape[0] # rearrange data - XX = np.tile(X_star[:, 0:1], (1, T)) # N x T - YY = np.tile(X_star[:, 1:2], (1, T)) # N x T TT = np.tile(t_star, (1, N)).T # N x T - UU = U_star[:, 0, :] # N x T - VV = U_star[:, 1, :] # N x T - PP = P_star # N x T - - x = XX.flatten()[:, None] # NT x 1 - y = YY.flatten()[:, None] # NT x 1 - t = TT.flatten()[:, None] # NT x 1 - - u = UU.flatten()[:, None] # NT x 1 - v = VV.flatten()[:, None] # NT x 1 - p = PP.flatten()[:, None] # NT x 1 - - data1 = np.concatenate([x, y, t, u, v, p], 1) - data2 = data1[:, :][data1[:, 2] <= 7] - data3 = data2[:, :][data2[:, 0] >= 1] - data4 = data3[:, :][data3[:, 0] <= 8] - data5 = data4[:, :][data4[:, 1] >= -2] - data_domain = data5[:, :][data5[:, 1] <= 2] - - idx = np.random.choice(data_domain.shape[0], N_TRAIN, replace=False) - - x_train = data_domain[idx, 0].reshape(data_domain[idx, 0].shape[0], 1) - y_train = data_domain[idx, 1].reshape(data_domain[idx, 1].shape[0], 1) - t_train = data_domain[idx, 2].reshape(data_domain[idx, 2].shape[0], 1) - snap = np.array([0]) x_star = X_star[:, 0:1] y_star = X_star[:, 1:2] @@ -370,17 +304,7 @@ def evaluate(cfg: DictConfig): }, "total_size": u_star.shape[0], "batch_size": u_star.shape[0], - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } - - geom = ppsci.geometry.PointCloud( - {"x": x_train, "y": y_train, "t": t_train}, ("x", "y", "t") - ) - # set equation constarint s.t. ||F(u)|| equation = { "NavierStokes": ppsci.equation.NavierStokes(nu=0.01, rho=1.0, dim=2, time=True), @@ -404,8 +328,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, equation=equation, - geom=geom, validator=validator, + cfg=cfg, ) # eval diff --git a/examples/nsfnet/VP_NSFNet3.py b/examples/nsfnet/VP_NSFNet3.py index d65995bfe..a717ecd0c 100644 --- a/examples/nsfnet/VP_NSFNet3.py +++ b/examples/nsfnet/VP_NSFNet3.py @@ -164,7 +164,6 @@ def main(cfg: DictConfig): def train(cfg: DictConfig): OUTPUT_DIR = cfg.output_dir - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") # set random seed for reproducibility SEED = cfg.seed @@ -222,11 +221,6 @@ def train(cfg: DictConfig): }, "batch_size": NB_TRAIN, "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } train_dataloader_cfg_0 = { @@ -237,11 +231,6 @@ def train(cfg: DictConfig): }, "batch_size": N0_TRAIN, "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } valida_dataloader_cfg = { @@ -252,11 +241,6 @@ def train(cfg: DictConfig): }, "total_size": u_star.shape[0], "batch_size": u_star.shape[0], - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } geom = ppsci.geometry.PointCloud( {"x": x_train, "y": y_train, "z": z_train, "t": t_train}, ("x", "y", "z", "t") @@ -332,21 +316,12 @@ def train(cfg: DictConfig): logger.init_logger("ppsci", f"{OUTPUT_DIR}/eval.log", "info") # initialize solver solver = ppsci.solver.Solver( - model=model, - constraint=constraint, + model, + constraint, optimizer=optimizer, - epochs=EPOCHS, - lr_scheduler=lr_scheduler, - iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=True, - log_freq=cfg.log_freq, - eval_freq=cfg.eval_freq, - seed=SEED, equation=equation, - geom=geom, validator=validator, - visualizer=None, - eval_with_no_grad=False, + cfg=cfg, ) # train model solver.train() @@ -358,30 +333,11 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): OUTPUT_DIR = cfg.output_dir - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") - - # set random seed for reproducibility - SEED = cfg.seed - ppsci.utils.misc.set_random_seed(SEED) # set model model = ppsci.arch.MLP(**cfg.MODEL) ppsci.utils.load_pretrain(model, cfg.pretrained_model_path) - # set the number of residual samples - N_TRAIN = cfg.ntrain - - # unsupervised part - xx = np.random.randint(31, size=N_TRAIN) / 15 - 1 - yy = np.random.randint(31, size=N_TRAIN) / 15 - 1 - zz = np.random.randint(31, size=N_TRAIN) / 15 - 1 - tt = np.random.randint(11, size=N_TRAIN) / 10 - - x_train = xx.reshape(xx.shape[0], 1).astype("float32") - y_train = yy.reshape(yy.shape[0], 1).astype("float32") - z_train = zz.reshape(zz.shape[0], 1).astype("float32") - t_train = tt.reshape(tt.shape[0], 1).astype("float32") - # test data x_star = ((np.random.rand(1000, 1) - 1 / 2) * 2).astype("float32") y_star = ((np.random.rand(1000, 1) - 1 / 2) * 2).astype("float32") @@ -400,16 +356,7 @@ def evaluate(cfg: DictConfig): }, "total_size": u_star.shape[0], "batch_size": u_star.shape[0], - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } - geom = ppsci.geometry.PointCloud( - {"x": x_train, "y": y_train, "z": z_train, "t": t_train}, ("x", "y", "z", "t") - ) - equation = { "NavierStokes": ppsci.equation.NavierStokes( nu=1.0 / cfg.re, rho=1.0, dim=3, time=True @@ -434,8 +381,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, equation=equation, - geom=geom, validator=validator, + cfg=cfg, ) # print the relative error diff --git a/examples/nsfnet/VP_NSFNet4.py b/examples/nsfnet/VP_NSFNet4.py index f60f76607..448178764 100644 --- a/examples/nsfnet/VP_NSFNet4.py +++ b/examples/nsfnet/VP_NSFNet4.py @@ -283,22 +283,12 @@ def train(cfg: DictConfig): optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) # initialize solver solver = ppsci.solver.Solver( - model=model, - constraint=constraint, - output_dir=cfg.output_dir, + model, + constraint, optimizer=optimizer, - lr_scheduler=lr_scheduler, - epochs=cfg.epochs, - iters_per_epoch=cfg.TRAIN.lr_scheduler.iters_per_epoch, - log_freq=cfg.TRAIN.log_freq, - save_freq=cfg.TRAIN.save_freq, - eval_freq=cfg.TRAIN.eval_freq, - eval_during_train=True, - seed=cfg.seed, equation=equation, - geom=geom, validator=validator, - eval_with_no_grad=cfg.TRAIN.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -455,7 +445,8 @@ def export(cfg: DictConfig): # load pretrained model solver = ppsci.solver.Solver( - model=model, pretrained_model_path=cfg.INFER.pretrained_model_path + model=model, + cfg=cfg, ) # export models diff --git a/examples/nsfnet/conf/VP_NSFNet4.yaml b/examples/nsfnet/conf/VP_NSFNet4.yaml index 385179a45..ddc99a783 100644 --- a/examples/nsfnet/conf/VP_NSFNet4.yaml +++ b/examples/nsfnet/conf/VP_NSFNet4.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,55 +15,46 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} -seed: 1234 + +mode: train output_dir: ${hydra:run.dir} +seed: 1234 data_dir: ./data/ -log_freq: 20 -MODEL: - input_keys: ["x", "y","z","t"] - output_keys: ["u", "v", "w","p"] - num_layers: 10 - hidden_size: 300 - activation: "tanh" - weight_norm: true -mode: train +log_freq: 5000 ntrain: 11333 nb_train: 2952 n0_train: 986 alpha: 100 beta: 100 re: 999.35 -epochs: 15250 + +MODEL: + input_keys: ["x", "y","z","t"] + output_keys: ["u", "v", "w","p"] + num_layers: 10 + hidden_size: 300 + activation: "tanh" + weight_norm: True TRAIN: - log_freq: 5000 + epochs: 15250 eval_freq: 5000 save_freq: 5000 - eval_with_no_grad: true + iters_per_epoch: 150 lr_scheduler: - epochs: 15250 + epochs: ${TRAIN.epochs} decay_epochs: [250, 4500, 5000, 5500] - iters_per_epoch: 150 values: [1e-3, 1e-4, 1e-5, 1e-6, 1e-7] + EVAL: pretrained_model_path: null eval_with_no_grad: true - INFER: pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/nsfnet/nsfnet4.pdparams export_path: ./inference/VP_NSFNet4 diff --git a/examples/operator_learning/conf/deeponet.yaml b/examples/operator_learning/conf/deeponet.yaml index b5b52b307..dbcb0d5b9 100644 --- a/examples/operator_learning/conf/deeponet.yaml +++ b/examples/operator_learning/conf/deeponet.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/operator_learning/deeponet.py b/examples/operator_learning/deeponet.py index 331b4428d..91ec47ae8 100644 --- a/examples/operator_learning/deeponet.py +++ b/examples/operator_learning/deeponet.py @@ -3,7 +3,6 @@ """ import os -from os import path as osp from typing import Callable from typing import Tuple @@ -18,11 +17,6 @@ def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.DeepONet(**cfg.MODEL) @@ -71,18 +65,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_freq=cfg.TRAIN.eval_freq, - log_freq=cfg.log_freq, - seed=cfg.seed, + optimizer=optimizer, validator=validator, - eval_during_train=cfg.TRAIN.eval_during_train, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model solver.train() @@ -96,11 +81,6 @@ def predict_func(input_dict): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.DeepONet(**cfg.MODEL) @@ -125,11 +105,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, - None, - cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) solver.eval() diff --git a/examples/phycrnet/conf/burgers_equations.yaml b/examples/phycrnet/conf/burgers_equations.yaml index e3d95cf23..6ad02ba78 100644 --- a/examples/phycrnet/conf/burgers_equations.yaml +++ b/examples/phycrnet/conf/burgers_equations.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml b/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml index 281e8488b..031703617 100644 --- a/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml +++ b/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/phycrnet/conf/lambda_omega_RD_equation.yaml b/examples/phycrnet/conf/lambda_omega_RD_equation.yaml index b5d7aaa67..ad517c035 100644 --- a/examples/phycrnet/conf/lambda_omega_RD_equation.yaml +++ b/examples/phycrnet/conf/lambda_omega_RD_equation.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/phycrnet/main.py b/examples/phycrnet/main.py index ef0f8fab8..ce9017488 100644 --- a/examples/phycrnet/main.py +++ b/examples/phycrnet/main.py @@ -2,7 +2,6 @@ PhyCRNet for solving spatiotemporal PDEs Reference: https://github.com/isds-neu/PhyCRNet/ """ -from os import path as osp import functions import hydra @@ -11,15 +10,9 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set initial states for convlstm NUM_CONVLSTM = cfg.num_convlstm (h0, c0) = (paddle.randn((1, 128, 16, 16)), paddle.randn((1, 128, 16, 16))) @@ -114,14 +107,9 @@ def _transform_out(_in, _out): solver = ppsci.solver.Solver( model, constraint_pde, - cfg.output_dir, - optimizer, - scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, + optimizer=optimizer, validator=validator_pde, - eval_with_no_grad=cfg.TRAIN.eval_with_no_grad, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, + cfg=cfg, ) # train model @@ -132,11 +120,6 @@ def _transform_out(_in, _out): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set initial states for convlstm NUM_CONVLSTM = cfg.num_convlstm (h0, c0) = (paddle.randn((1, 128, 16, 16)), paddle.randn((1, 128, 16, 16))) diff --git a/examples/phygeonet/conf/heat_equation.yaml b/examples/phygeonet/conf/heat_equation.yaml index 63fdc4ebe..991a0fbdd 100644 --- a/examples/phygeonet/conf/heat_equation.yaml +++ b/examples/phygeonet/conf/heat_equation.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/phygeonet/conf/heat_equation_with_bc.yaml b/examples/phygeonet/conf/heat_equation_with_bc.yaml index af92466c7..136b95879 100644 --- a/examples/phygeonet/conf/heat_equation_with_bc.yaml +++ b/examples/phygeonet/conf/heat_equation_with_bc.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/phygeonet/heat_equation.py b/examples/phygeonet/heat_equation.py index ba7869b6e..b3c2c4335 100644 --- a/examples/phygeonet/heat_equation.py +++ b/examples/phygeonet/heat_equation.py @@ -84,10 +84,8 @@ def _transform_out( solver = ppsci.solver.Solver( model, sup_constraint, - cfg.output_dir, - optimizer, - epochs=cfg.epochs, - iters_per_epoch=iters_per_epoch, + optimizer=optimizer, + cfg=cfg, ) solver.train() solver.plot_loss_history() @@ -104,7 +102,7 @@ def evaluate(cfg: DictConfig): model = ppsci.arch.USCNN(**cfg.MODEL) solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.EVAL.pretrained_model_path, ### the path of the model + cfg=cfg, ) output_v = solver.predict({"coords": paddle.to_tensor(coords)}) output_v = output_v["output_v"] diff --git a/examples/phygeonet/heat_equation_with_bc.py b/examples/phygeonet/heat_equation_with_bc.py index 0af45be70..03f59b13f 100644 --- a/examples/phygeonet/heat_equation_with_bc.py +++ b/examples/phygeonet/heat_equation_with_bc.py @@ -89,10 +89,8 @@ def _transform_out( solver = ppsci.solver.Solver( model, sup_constraint, - cfg.output_dir, - optimizer, - epochs=cfg.epochs, - iters_per_epoch=iters_per_epoch, + optimizer=optimizer, + cfg=cfg, ) solver.train() @@ -109,7 +107,7 @@ def evaluate(cfg: DictConfig): coords = paddle.to_tensor(data["coords"]) solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.EVAL.pretrained_model_path, ### the path of the model + cfg=cfg, ) paras = paras.reshape([paras.shape[0], 1, paras.shape[1], paras.shape[2]]) diff --git a/examples/phylstm/conf/phylstm2.yaml b/examples/phylstm/conf/phylstm2.yaml index ff5eedde5..b3dc1ddd6 100644 --- a/examples/phylstm/conf/phylstm2.yaml +++ b/examples/phylstm/conf/phylstm2.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/phylstm/conf/phylstm3.yaml b/examples/phylstm/conf/phylstm3.yaml index 630e0c8c7..67326c3aa 100644 --- a/examples/phylstm/conf/phylstm3.yaml +++ b/examples/phylstm/conf/phylstm3.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/phylstm/phylstm2.py b/examples/phylstm/phylstm2.py index 38af11fca..50f19730f 100755 --- a/examples/phylstm/phylstm2.py +++ b/examples/phylstm/phylstm2.py @@ -16,7 +16,6 @@ Reference: https://github.com/zhry10/PhyLSTM.git """ -from os import path as osp import functions import hydra @@ -25,15 +24,9 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) ag_data = mat["input_tf"] # ag, ad, av u_data = mat["target_X_tf"] @@ -151,11 +144,6 @@ def train(cfg: DictConfig): "input": input_dict_val, "label": label_dict_val, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": 1, "num_workers": 0, }, @@ -178,17 +166,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint_pde, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - seed=cfg.seed, + optimizer=optimizer, validator=validator_pde, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model @@ -198,11 +178,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) ag_data = mat["input_tf"] # ag, ad, av u_data = mat["target_X_tf"] @@ -292,11 +267,6 @@ def evaluate(cfg: DictConfig): "input": input_dict_val, "label": label_dict_val, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": 1, "num_workers": 0, }, @@ -317,11 +287,8 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - seed=cfg.seed, validator=validator_pde, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate solver.eval() diff --git a/examples/phylstm/phylstm3.py b/examples/phylstm/phylstm3.py index 071ecbeed..3274ccc11 100755 --- a/examples/phylstm/phylstm3.py +++ b/examples/phylstm/phylstm3.py @@ -16,7 +16,6 @@ Reference: https://github.com/zhry10/PhyLSTM.git """ -from os import path as osp import functions import hydra @@ -25,15 +24,9 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) t = mat["time"] dt = 0.02 @@ -129,11 +122,6 @@ def train(cfg: DictConfig): "input": input_dict_train, "label": label_dict_train, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": 1, "num_workers": 0, }, @@ -159,11 +147,6 @@ def train(cfg: DictConfig): "input": input_dict_val, "label": label_dict_val, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": 1, "num_workers": 0, }, @@ -188,17 +171,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint_pde, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - seed=cfg.seed, + optimizer=optimizer, validator=validator_pde, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model @@ -208,11 +183,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) t = mat["time"] dt = 0.02 @@ -308,11 +278,6 @@ def evaluate(cfg: DictConfig): "input": input_dict_val, "label": label_dict_val, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": 1, "num_workers": 0, }, @@ -335,11 +300,8 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - seed=cfg.seed, validator=validator_pde, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate diff --git a/examples/pipe/conf/poiseuille_flow.yaml b/examples/pipe/conf/poiseuille_flow.yaml index c12105d07..6329081ca 100644 --- a/examples/pipe/conf/poiseuille_flow.yaml +++ b/examples/pipe/conf/poiseuille_flow.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/pipe/poiseuille_flow.py b/examples/pipe/poiseuille_flow.py index f9481bb2a..dfc2965ce 100644 --- a/examples/pipe/poiseuille_flow.py +++ b/examples/pipe/poiseuille_flow.py @@ -128,11 +128,6 @@ def output_trans_p(input, out): "num_workers": 1, "batch_size": cfg.TRAIN.batch_size.pde_constraint, "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "shuffle": False, - "drop_last": False, - }, }, loss=ppsci.loss.MSELoss("mean"), evenly=True, @@ -304,11 +299,6 @@ def forward(self, output_dict, label_dict): "weight": weight_dict_KL, } eval_cfg = { - "sampler": { - "name": "BatchSampler", - "shuffle": False, - "drop_last": False, - }, "batch_size": 2000, } eval_cfg["dataset"] = dataset_vel @@ -335,10 +325,8 @@ def forward(self, output_dict, label_dict): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) solver.eval() diff --git a/examples/rossler/conf/enn.yaml b/examples/rossler/conf/enn.yaml index 03f44484e..c2ab876b3 100644 --- a/examples/rossler/conf/enn.yaml +++ b/examples/rossler/conf/enn.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/rossler/conf/transformer.yaml b/examples/rossler/conf/transformer.yaml index 60ec738bf..92829a218 100644 --- a/examples/rossler/conf/transformer.yaml +++ b/examples/rossler/conf/transformer.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -15,6 +25,9 @@ hydra: - output_dir - log_freq - EMBEDDING_MODEL_PATH + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/rossler/train_enn.py b/examples/rossler/train_enn.py index c26dcb89e..dd60cd380 100644 --- a/examples/rossler/train_enn.py +++ b/examples/rossler/train_enn.py @@ -18,7 +18,6 @@ # This file is for step1: training a embedding model. # This file is based on PaddleScience/ppsci API. -from os import path as osp import hydra import numpy as np @@ -26,7 +25,6 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def get_mean_std(data: np.ndarray): @@ -44,11 +42,6 @@ def get_mean_std(data: np.ndarray): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e3 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) @@ -123,11 +116,6 @@ def train(cfg: DictConfig): key: value for key, value in zip(cfg.MODEL.output_keys, weights) }, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -143,13 +131,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=True, + optimizer=optimizer, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -158,11 +142,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e3 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) @@ -222,11 +201,6 @@ def evaluate(cfg: DictConfig): key: value for key, value in zip(cfg.MODEL.output_keys, weights) }, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -240,9 +214,8 @@ def evaluate(cfg: DictConfig): validator = {mse_validator.name: mse_validator} solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() diff --git a/examples/rossler/train_transformer.py b/examples/rossler/train_transformer.py index a58b8b8d2..8925a74ce 100644 --- a/examples/rossler/train_transformer.py +++ b/examples/rossler/train_transformer.py @@ -18,7 +18,6 @@ # This file is for step2: training a transformer model, based on frozen pretrained embedding model. # This file is based on PaddleScience/ppsci API. -from os import path as osp from typing import Dict import hydra @@ -27,7 +26,6 @@ import ppsci from ppsci.arch import base -from ppsci.utils import logger from ppsci.utils import save_load @@ -53,11 +51,6 @@ def __call__(self, x: Dict[str, paddle.Tensor]): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) output_transform = OutputTransform(embedding_model) @@ -114,11 +107,6 @@ def train(cfg: DictConfig): "stride": 1024, "embedding_model": embedding_model, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -154,15 +142,10 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, visualizer=visualizer, + cfg=cfg, ) # train model solver.train() @@ -173,11 +156,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) output_transform = OutputTransform(embedding_model) @@ -195,11 +173,6 @@ def evaluate(cfg: DictConfig): "stride": 1024, "embedding_model": embedding_model, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -234,10 +207,9 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() # visualize prediction for pretrained model(optional) @@ -258,7 +230,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/shock_wave/conf/shock_wave_Ma0.728.yaml b/examples/shock_wave/conf/shock_wave_Ma0.728.yaml index e8ee30997..d4ca2c1c0 100644 --- a/examples/shock_wave/conf/shock_wave_Ma0.728.yaml +++ b/examples/shock_wave/conf/shock_wave_Ma0.728.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,17 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/shock_wave/conf/shock_wave_Ma2.0.yaml b/examples/shock_wave/conf/shock_wave_Ma2.0.yaml index 80e8badaf..0051030bf 100644 --- a/examples/shock_wave/conf/shock_wave_Ma2.0.yaml +++ b/examples/shock_wave/conf/shock_wave_Ma2.0.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,17 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/shock_wave/shock_wave.py b/examples/shock_wave/shock_wave.py index 900b5edf7..ca2a3dda3 100644 --- a/examples/shock_wave/shock_wave.py +++ b/examples/shock_wave/shock_wave.py @@ -24,7 +24,6 @@ import ppsci from ppsci import equation from ppsci.autodiff import jacobian -from ppsci.utils import logger from ppsci.utils import misc @@ -245,12 +244,6 @@ def generate_bc_left_points( def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -403,18 +396,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - seed=cfg.seed, + optimizer=optimizer, equation=equation, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # HACK: Given entire solver to euaqtion object for tracking run-time epoch # to compute factor `relu` dynamically. @@ -426,22 +410,13 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - seed=cfg.seed, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # visualize prediction diff --git a/examples/tempoGAN/conf/tempogan.yaml b/examples/tempoGAN/conf/tempogan.yaml index c6787afbb..b329f3c8c 100644 --- a/examples/tempoGAN/conf/tempogan.yaml +++ b/examples/tempoGAN/conf/tempogan.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,17 +15,9 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} @@ -28,9 +30,10 @@ output_dir: ${hydra:run.dir} log_freq: 20 DATASET_PATH: ./datasets/tempoGAN/2d_train.mat DATASET_PATH_VALID: ./datasets/tempoGAN/2d_valid.mat +use_amp: true +amp_level: O2 # set working condition -USE_AMP: true USE_SPATIALDISC: true USE_TEMPODISC: true WEIGHT_GEN: [5.0, 0.0, 1.0] # lambda_l1, lambda_l2, lambda_t @@ -86,7 +89,6 @@ TRAIN: gamma: 0.05 by_epoch: true eval_during_train: false - amp_level: O2 pretrained_model_path: null checkpoint_path: null diff --git a/examples/tempoGAN/tempoGAN.py b/examples/tempoGAN/tempoGAN.py index 99db83540..b84106064 100644 --- a/examples/tempoGAN/tempoGAN.py +++ b/examples/tempoGAN/tempoGAN.py @@ -35,10 +35,6 @@ def train(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - gen_funcs = func_module.GenFuncs( cfg.WEIGHT_GEN, (cfg.WEIGHT_GEN_LAYER if cfg.USE_SPATIALDISC else None) ) @@ -112,11 +108,6 @@ def train(cfg: DictConfig): ), }, "batch_size": cfg.TRAIN.batch_size.sup_constraint, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.FunctionalLoss(gen_funcs.loss_func_gen), { @@ -144,11 +135,6 @@ def train(cfg: DictConfig): ), }, "batch_size": int(cfg.TRAIN.batch_size.sup_constraint // 3), - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.FunctionalLoss(gen_funcs.loss_func_gen_tempo), { @@ -189,11 +175,6 @@ def train(cfg: DictConfig): ), }, "batch_size": cfg.TRAIN.batch_size.sup_constraint, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.FunctionalLoss(disc_funcs.loss_func), name="sup_constraint_disc", @@ -230,11 +211,6 @@ def train(cfg: DictConfig): ), }, "batch_size": int(cfg.TRAIN.batch_size.sup_constraint // 3), - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.FunctionalLoss(disc_funcs.loss_func_tempo), name="sup_constraint_disc_tempo", @@ -247,40 +223,22 @@ def train(cfg: DictConfig): solver_gen = ppsci.solver.Solver( model_list, constraint_gen, - cfg.output_dir, - optimizer_gen, - lr_scheduler_gen, - cfg.TRAIN.epochs_gen, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - use_amp=cfg.USE_AMP, - amp_level=cfg.TRAIN.amp_level, + optimizer=optimizer_gen, + cfg=cfg, ) if cfg.USE_SPATIALDISC: solver_disc = ppsci.solver.Solver( model_list, constraint_disc, - cfg.output_dir, - optimizer_disc, - lr_scheduler_disc, - cfg.TRAIN.epochs_disc, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - use_amp=cfg.USE_AMP, - amp_level=cfg.TRAIN.amp_level, + optimizer=optimizer_disc, + cfg=cfg, ) if cfg.USE_TEMPODISC: solver_disc_tempo = ppsci.solver.Solver( model_list, constraint_disc_tempo, - cfg.output_dir, - optimizer_disc_tempo, - lr_scheduler_disc_tempo, - cfg.TRAIN.epochs_disc_tempo, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - use_amp=cfg.USE_AMP, - amp_level=cfg.TRAIN.amp_level, + optimizer=optimizer_disc_tempo, + cfg=cfg, ) PRED_INTERVAL = 200 @@ -329,10 +287,6 @@ def evaluate(cfg: DictConfig): os.makedirs(osp.join(cfg.output_dir, "eval_outs"), exist_ok=True) - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - gen_funcs = func_module.GenFuncs(cfg.WEIGHT_GEN, None) # load dataset @@ -357,11 +311,6 @@ def evaluate(cfg: DictConfig): }, "label": {"density_high": dataset_valid["density_high"]}, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": 1, } sup_validator = ppsci.validate.SupervisedValidator( diff --git a/examples/topopt/conf/topopt.yaml b/examples/topopt/conf/topopt.yaml index 813ec8fbe..8642ec464 100644 --- a/examples/topopt/conf/topopt.yaml +++ b/examples/topopt/conf/topopt.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory @@ -21,6 +31,9 @@ hydra: - mode - vol_coeff - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} @@ -67,7 +80,13 @@ EVAL: # inference settings INFER: pretrained_model_name: null # a string, indicating which model you want to export. Support [Uniform, Poisson5, Poisson10, Poisson30]. - pretrained_model_path_dict: {'Uniform': 'https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/uniform_pretrained.pdparams', 'Poisson5': 'https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson5_pretrained.pdparams', 'Poisson10': 'https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson10_pretrained.pdparams', 'Poisson30': 'https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson30_pretrained.pdparams'} + pretrained_model_path_dict: + { + "Uniform": "https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/uniform_pretrained.pdparams", + "Poisson5": "https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson5_pretrained.pdparams", + "Poisson10": "https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson10_pretrained.pdparams", + "Poisson30": "https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson30_pretrained.pdparams", + } export_path: ./inference/topopt_${INFER.pretrained_model_name} pdmodel_path: ${INFER.export_path}.pdmodel pdiparams_path: ${INFER.export_path}.pdiparams diff --git a/examples/topopt/topopt.py b/examples/topopt/topopt.py index 3e855510a..cb579dd2e 100644 --- a/examples/topopt/topopt.py +++ b/examples/topopt/topopt.py @@ -29,11 +29,6 @@ def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # 4 training cases parameters LEARNING_RATE = cfg.TRAIN.learning_rate / (1 + cfg.TRAIN.epochs // 15) ITERS_PER_EPOCH = int(cfg.n_samples * cfg.train_test_ratio / cfg.TRAIN.batch_size) @@ -101,11 +96,9 @@ def train(cfg: DictConfig): model, constraint, OUTPUT_DIR, - optimizer, - epochs=cfg.TRAIN.epochs, + optimizer=optimizer, iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, + cfg=cfg, ) # train model @@ -114,11 +107,6 @@ def train(cfg: DictConfig): # evaluate 4 models def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # fixed iteration stop times for evaluation iterations_stop_times = range(5, 85, 5) model = TopOptNN(**cfg.MODEL) @@ -236,7 +224,6 @@ def evaluate_model( "drop_last": False, "shuffle": True, }, - "num_workers": 0, }, ppsci.loss.FunctionalLoss(loss_wrapper(cfg)), {"output": lambda out: out["output"]}, diff --git a/examples/yinglong1/conf/yinglong_12.yaml b/examples/yinglong1/conf/yinglong_12.yaml index c4b67c395..dc6140073 100644 --- a/examples/yinglong1/conf/yinglong_12.yaml +++ b/examples/yinglong1/conf/yinglong_12.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -6,16 +16,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/yinglong1/conf/yinglong_24.yaml b/examples/yinglong1/conf/yinglong_24.yaml index 8abaaf18f..7f187e933 100644 --- a/examples/yinglong1/conf/yinglong_24.yaml +++ b/examples/yinglong1/conf/yinglong_24.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -6,16 +16,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/test/utils/test_config.py b/test/utils/test_config.py index 844d1f449..9e3af8f46 100644 --- a/test/utils/test_config.py +++ b/test/utils/test_config.py @@ -31,7 +31,6 @@ def test_invalid_epochs(tmpdir, epochs, mode, seed): "epochs": epochs, }, } - dir_ = os.path.dirname(__file__) config_abs_path = os.path.join(dir_, "test_config.yaml") with open(config_abs_path, "w") as f: @@ -45,6 +44,5 @@ def test_invalid_epochs(tmpdir, epochs, mode, seed): assert exec_info.value.code == 2 -# 这部分通常不需要,除非你想直接从脚本运行测试 if __name__ == "__main__": pytest.main()