This view is limited to 50 files because it contains too many changes.  See the raw diff here.
Files changed (50) hide show
  1. .gitattributes +55 -1
  2. .gitignore +0 -210
  3. DATA_GUIDE.md +0 -825
  4. MODEL_GUIDE.md +0 -352
  5. README.md +0 -515
  6. configs/1dkdv.yaml +0 -5
  7. configs/1dkdv_ttt.yaml +0 -8
  8. configs/2ddf.yaml +0 -8
  9. configs/2ddf_ttt.yaml +0 -9
  10. configs/2dns.yaml +0 -5
  11. configs/2dns_ttt.yaml +0 -10
  12. configs/2drddu.yaml +0 -7
  13. configs/2drddu_ttt.yaml +0 -12
  14. configs/2drdk.yaml +0 -6
  15. configs/2drdk_ttt.yaml +0 -10
  16. configs/2dtf.yaml +0 -6
  17. configs/2dtf_ttt.yaml +0 -11
  18. configs/base.yaml +0 -19
  19. configs/callbacks/2ddf.yaml +0 -9
  20. configs/callbacks/base.yaml +0 -10
  21. configs/data/base.yaml +0 -112
  22. configs/lightning_module/base.yaml +0 -10
  23. configs/lightning_module/ttt.yaml +0 -8
  24. configs/logging/base.yaml +0 -4
  25. configs/loss/mse.yaml +0 -5
  26. configs/loss/relative.yaml +0 -5
  27. configs/lr_scheduler/cosine.yaml +0 -2
  28. configs/model/fno.yaml +0 -36
  29. configs/model/fno_50k.yaml +0 -9
  30. configs/model/fno_50mil.yaml +0 -9
  31. configs/model/resnet.yaml +0 -35
  32. configs/model/scot.yaml +0 -41
  33. configs/optimizer/adam.yaml +0 -2
  34. configs/system_params/1dkdv.yaml +0 -17
  35. configs/system_params/2ddf.yaml +0 -27
  36. configs/system_params/2dns.yaml +0 -16
  37. configs/system_params/2drddu.yaml +0 -9
  38. configs/system_params/2drdk.yaml +0 -18
  39. configs/system_params/2dtf.yaml +0 -16
  40. configs/system_params/base.yaml +0 -91
  41. configs/tailoring_optimizer/adam.yaml +0 -2
  42. configs/tailoring_optimizer/sgd.yaml +0 -2
  43. configs/trainer/trainer.yaml +0 -4
  44. configs/ttt_base.yaml +0 -14
  45. environment.yml +0 -158
  46. fluid_stats.py +0 -418
  47. huggingface_pdeinv_download.py +0 -60
  48. images/1dkdv.png +0 -3
  49. images/2d_navier_stokes_unforced_train_val_split.png +0 -3
  50. images/2ddf.png +0 -3
.gitattributes CHANGED
@@ -1,5 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  *.gif filter=lfs diff=lfs merge=lfs -text
2
- *.webp filter=lfs diff=lfs merge=lfs -text
3
  *.png filter=lfs diff=lfs merge=lfs -text
 
 
4
  *.jpg filter=lfs diff=lfs merge=lfs -text
5
  *.jpeg filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mds filter=lfs diff=lfs merge=lfs -text
13
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
+ *.model filter=lfs diff=lfs merge=lfs -text
15
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
16
+ *.npy filter=lfs diff=lfs merge=lfs -text
17
+ *.npz filter=lfs diff=lfs merge=lfs -text
18
+ *.onnx filter=lfs diff=lfs merge=lfs -text
19
+ *.ot filter=lfs diff=lfs merge=lfs -text
20
+ *.parquet filter=lfs diff=lfs merge=lfs -text
21
+ *.pb filter=lfs diff=lfs merge=lfs -text
22
+ *.pickle filter=lfs diff=lfs merge=lfs -text
23
+ *.pkl filter=lfs diff=lfs merge=lfs -text
24
+ *.pt filter=lfs diff=lfs merge=lfs -text
25
+ *.pth filter=lfs diff=lfs merge=lfs -text
26
+ *.rar filter=lfs diff=lfs merge=lfs -text
27
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
28
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar filter=lfs diff=lfs merge=lfs -text
31
+ *.tflite filter=lfs diff=lfs merge=lfs -text
32
+ *.tgz filter=lfs diff=lfs merge=lfs -text
33
+ *.wasm filter=lfs diff=lfs merge=lfs -text
34
+ *.xz filter=lfs diff=lfs merge=lfs -text
35
+ *.zip filter=lfs diff=lfs merge=lfs -text
36
+ *.zst filter=lfs diff=lfs merge=lfs -text
37
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
38
+ # Audio files - uncompressed
39
+ *.pcm filter=lfs diff=lfs merge=lfs -text
40
+ *.sam filter=lfs diff=lfs merge=lfs -text
41
+ *.raw filter=lfs diff=lfs merge=lfs -text
42
+ # Audio files - compressed
43
+ *.aac filter=lfs diff=lfs merge=lfs -text
44
+ *.flac filter=lfs diff=lfs merge=lfs -text
45
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
46
+ *.ogg filter=lfs diff=lfs merge=lfs -text
47
+ *.wav filter=lfs diff=lfs merge=lfs -text
48
+ # Image files - uncompressed
49
+ *.bmp filter=lfs diff=lfs merge=lfs -text
50
  *.gif filter=lfs diff=lfs merge=lfs -text
 
51
  *.png filter=lfs diff=lfs merge=lfs -text
52
+ *.tiff filter=lfs diff=lfs merge=lfs -text
53
+ # Image files - compressed
54
  *.jpg filter=lfs diff=lfs merge=lfs -text
55
  *.jpeg filter=lfs diff=lfs merge=lfs -text
56
+ *.webp filter=lfs diff=lfs merge=lfs -text
57
+ # Video files - compressed
58
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
59
+ *.webm filter=lfs diff=lfs merge=lfs -text
.gitignore DELETED
@@ -1,210 +0,0 @@
1
- # Ignore W&B
2
- wandb/**
3
-
4
- # Mac os files
5
- .DS_Store
6
-
7
- # Ignore .specstory directory
8
- .specstory/
9
-
10
- # Local data store
11
- **.npz
12
- **.json
13
-
14
- # Ignore model files
15
- **.pt
16
- **.pth
17
-
18
- # Ignore local scripts (and local images)
19
- local_scripts/**
20
- tests/test-images
21
- # **.png
22
- **.jpeg
23
- **.pdf
24
-
25
-
26
- #ignore runner scripts
27
- runner*
28
- slurm*
29
- # Ignore local directories
30
- notebooks/**
31
- local-scripts/**
32
- .vscode/**
33
-
34
- # Logging folders
35
- test-images/**
36
- logs/**
37
- wandb/**
38
- outputs/**
39
-
40
- # wandb artifacts containing model checkpoints
41
- artifacts/**
42
-
43
- # Byte-compiled / optimized / DLL files
44
- __pycache__/
45
- *.py[cod]
46
- *$py.class
47
-
48
- # C extensions
49
- *.so
50
-
51
- # Distribution / packaging
52
- .Python
53
- build/
54
- develop-eggs/
55
- dist/
56
- downloads/
57
- eggs/
58
- .eggs/
59
- lib/
60
- lib64/
61
- parts/
62
- sdist/
63
- var/
64
- wheels/
65
- share/python-wheels/
66
- *.egg-info/
67
- .installed.cfg
68
- *.egg
69
- MANIFEST
70
-
71
- # PyInstaller
72
- # Usually these files are written by a python script from a template
73
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
74
- *.manifest
75
- *.spec
76
-
77
- # Installer logs
78
- pip-log.txt
79
- pip-delete-this-directory.txt
80
-
81
- # Unit test / coverage reports
82
- htmlcov/
83
- .tox/
84
- .nox/
85
- .coverage
86
- .coverage.*
87
- .cache
88
- nosetests.xml
89
- coverage.xml
90
- *.cover
91
- *.py,cover
92
- .hypothesis/
93
- .pytest_cache/
94
- cover/
95
-
96
- # Translations
97
- *.mo
98
- *.pot
99
-
100
- # Django stuff:
101
- *.log
102
- local_settings.py
103
- db.sqlite3
104
- db.sqlite3-journal
105
-
106
- # Flask stuff:
107
- instance/
108
- .webassets-cache
109
-
110
- # Scrapy stuff:
111
- .scrapy
112
-
113
- # Sphinx documentation
114
- docs/_build/
115
-
116
- # PyBuilder
117
- .pybuilder/
118
- target/
119
-
120
- # Jupyter Notebook
121
- .ipynb_checkpoints
122
- temp.ipynb
123
-
124
- # Model checkpoints
125
- *ckpt
126
-
127
- # IPython
128
- profile_default/
129
- ipython_config.py
130
-
131
- # pyenv
132
- # For a library or package, you might want to ignore these files since the code is
133
- # intended to run in multiple environments; otherwise, check them in:
134
- # .python-version
135
-
136
- # pipenv
137
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
138
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
139
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
140
- # install all needed dependencies.
141
- #Pipfile.lock
142
-
143
- # poetry
144
- # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
145
- # This is especially recommended for binary packages to ensure reproducibility, and is more
146
- # commonly ignored for libraries.
147
- # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
148
- #poetry.lock
149
-
150
- # pdm
151
- # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
152
- #pdm.lock
153
- # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
154
- # in version control.
155
- # https://pdm.fming.dev/#use-with-ide
156
- .pdm.toml
157
-
158
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
159
- __pypackages__/
160
-
161
- # Celery stuff
162
- celerybeat-schedule
163
- celerybeat.pid
164
-
165
- # SageMath parsed files
166
- *.sage.py
167
-
168
- # Environments
169
- .env
170
- .venv
171
- env/
172
- venv/
173
- ENV/
174
- env.bak/
175
- venv.bak/
176
-
177
- # Spyder project settings
178
- .spyderproject
179
- .spyproject
180
-
181
- # Rope project settings
182
- .ropeproject
183
-
184
- # mkdocs documentation
185
- /site
186
-
187
- # mypy
188
- .mypy_cache/
189
- .dmypy.json
190
- dmypy.json
191
-
192
- # Pyre type checker
193
- .pyre/
194
-
195
- # pytype static type analyzer
196
- .pytype/
197
-
198
- # Cython debug symbols
199
- cython_debug/
200
-
201
- # PyCharm
202
- # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
203
- # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
204
- # and can be added to the global gitignore or merged into this file. For a more nuclear
205
- # option (not recommended) you can uncomment the following to ignore the entire idea folder.
206
- #.idea/
207
- # images/*.gif
208
- # images/*.png
209
- # images/*.gif
210
- # images/*.png
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
DATA_GUIDE.md DELETED
@@ -1,825 +0,0 @@
1
- # PDEInvBench Data Guide
2
- Data guide for the dataset accompanying PDEInvBench.
3
- <img src="images/pde_objectives_main_fig_1.png" alt="" width="400">
4
-
5
- ## Table of Contents
6
-
7
- 1. [Dataset Link](#1-dataset-link)
8
- 2. [Downloading Data](#2-downloading-data)
9
- 3. [Overview](#3-overview)
10
- - [3.1 Data Format](#31-data-format)
11
- - [3.2 Parameter Extraction from Filenames](#32-parameter-extraction-from-filenames)
12
- - [3.3 Working with High-Resolution Data](#33-working-with-high-resolution-data)
13
- - [3.4 Data Loading Parameters](#34-data-loading-parameters)
14
- - [3.5 Parameter Normalization](#35-parameter-normalization)
15
- 4. [Datasets](#4-datasets)
16
- - [4a. 2D Reaction Diffusion](#4a-2d-reaction-diffusion)
17
- - [4b. 2D Navier Stokes (Unforced)](#4b-2d-navier-stokes-unforced)
18
- - [4c. 2D Turbulent Flow (Forced Navier Stokes)](#4c-2d-turbulent-flow-forced-navier-stokes)
19
- - [4d. 1D Korteweg-De Vries](#4d-1d-korteweg-de-vries)
20
- - [4e. 2D Darcy Flow](#4e-2d-darcy-flow)
21
- 5. [Adding a New Dataset](#5-adding-a-new-dataset)
22
-
23
-
24
- ## 1. Dataset Link
25
-
26
- The dataset used in this project can be found here:
27
- https://huggingface.co/datasets/DabbyOWL/PDE_Inverse_Problem_Benchmarking/tree/main
28
-
29
- ## 2. Downloading Data
30
-
31
- We provide a python script: [`huggingface_pdeinv_download.py`](huggingface_pdeinv_download.py) to batch download our hugging-face data. We will update the readme of our hugging-face dataset and our github repo to reflect this addition. To run this:
32
-
33
- ```bash
34
- pip install huggingface_hub
35
- python3 huggingface_pdeinv_download.py [--dataset DATASET_NAME] [--split SPLIT] [--local-dir PATH]
36
- ```
37
-
38
- **Available datasets:** `darcy-flow-241`, `darcy-flow-421`, `korteweg-de-vries-1d`, `navier-stokes-forced-2d-2048`, `navier-stokes-forced-2d`, `navier-stokes-unforced-2d`, `reaction-diffusion-2d-du-512`, `reaction-diffusion-2d-du`, `reaction-diffusion-2d-k-512`, `reaction-diffusion-2d-k`
39
-
40
- **Available splits:** `*` (all), `train`, `validation`, `test`, `out_of_distribution`, `out_of_distribution_extreme`
41
-
42
-
43
- ## 3. Overview
44
-
45
- The PDEInvBench dataset contains five PDE systems spanning parabolic, hyperbolic, and elliptic classifications, designed for benchmarking inverse parameter estimation.
46
-
47
- ### Dataset Scale and Scope
48
-
49
- The dataset encompasses **over 1.2 million individual simulations** across five PDE systems, with varying spatial and temporal resolutions:
50
-
51
- - **2D Reaction Diffusion**: 28×28×27 = 21,168 parameter combinations × 5 trajectories = 105,840 simulations
52
- - **2D Navier Stokes**: 101 parameter values × 192 trajectories = 19,392 simulations
53
- - **2D Turbulent Flow**: 120 parameter values × 108 trajectories = 12,960 simulations
54
- - **1D Korteweg-De Vries**: 100 parameter values × 100 trajectories = 10,000 simulations
55
- - **2D Darcy Flow**: 2,048 unique coefficient fields
56
-
57
- ### Multi-Resolution Architecture
58
-
59
- The dataset provides multiple spatial resolutions for each system, enabling studies on resolution-dependent generalization:
60
-
61
- - **Low Resolution**: 64×64 (2D systems), 256 (1D KdV), 241×241 (Darcy Flow)
62
- - **Medium Resolution**: 128×128 (2D systems), 256×256 (Turbulent Flow)
63
- - **High Resolution**: 256×256, 512×512, 2048x2048 (2D systems), 421×421 (Darcy Flow)
64
-
65
- ### Physical and Mathematical Diversity
66
-
67
- **Parabolic Systems** (Time-dependent, diffusive):
68
- - **2D Reaction Diffusion**: Chemical pattern formation with Fitzhugh-Nagumo dynamics
69
- - **2D Navier Stokes**: Fluid flow without external forcing
70
- - **2D Turbulent Flow**: Forced fluid dynamics with Kolmogorov forcing
71
-
72
- **Hyperbolic Systems** (Wave propagation):
73
- - **1D Korteweg-De Vries**: Soliton dynamics in shallow water waves
74
-
75
- **Elliptic Systems** (Steady-state):
76
- - **2D Darcy Flow**: Groundwater flow through porous media
77
-
78
- ### Parameter Space Coverage
79
-
80
- The dataset systematically explores parameter spaces across different physical regimes:
81
-
82
- - **Reaction Diffusion**: k ∈ [0.005,0.1], Du ∈ [0.01,0.5], Dv ∈ [0.01,0.5] (Turing bifurcations)
83
- - **Navier Stokes**: ν ∈ [10⁻⁴,10⁻²] (Reynolds: 80-8000, laminar to transitional)
84
- - **Turbulent Flow**: ν ∈ [10⁻⁵,10⁻²] (fully developed turbulence)
85
- - **Korteweg-De Vries**: δ ∈ [0.8,5] (dispersion strength in shallow water)
86
- - **Darcy Flow**: Piecewise constant diffusion coefficients (porous media heterogeneity)
87
-
88
- ### Evaluation Framework
89
-
90
- The dataset implements a sophisticated three-tier evaluation system for comprehensive generalization testing:
91
-
92
- 1. **In-Distribution (ID)**: Parameters within training ranges for baseline performance
93
- 2. **Out-of-Distribution (Non-Extreme)**: Middle-range parameters excluded from training
94
- 3. **Out-of-Distribution (Extreme)**: Extremal parameter values for stress testing
95
-
96
- This framework enables systematic evaluation of model robustness across parameter space, critical for real-world deployment where models must generalize beyond training distributions.
97
-
98
-
99
- ### Data Organization and Accessibility
100
-
101
- The dataset is organized in a standardized HDF5 format with:
102
-
103
- - **Hierarchical Structure**: Train/validation/test splits with consistent naming conventions
104
- - **Parameter Encoding**: Filenames encode parameter values for easy parsing
105
- - **Multi-Channel Support**: 2D systems support multiple solution channels (velocity components, chemical species)
106
- - **Grid Information**: Complete spatial and temporal coordinate information
107
- - **Normalization Statistics**: Pre-computed parameter normalization for consistent preprocessing
108
-
109
- ### Key Features for Inverse Problem Benchmarking
110
-
111
- 1. **Multi-Physics Coverage**: Spans chemical, fluid, wave, and porous media physics
112
- 2. **Resolution Scalability**: Enables studies on resolution-dependent model behavior
113
- 3. **Parameter Diversity**: Systematic exploration of parameter spaces across physical regimes
114
- 4. **Generalization Testing**: Built-in evaluation framework for out-of-distribution performance
115
- 5. **Computational Efficiency**: Optimized data loading and preprocessing pipelines
116
- 6. **Reproducibility**: Complete documentation of generation parameters and solver configurations
117
-
118
- This comprehensive dataset provides researchers with a unified platform for developing and evaluating inverse problem solving methods across diverse scientific domains, enabling systematic comparison of approaches and identification of fundamental limitations in current methodologies.
119
-
120
- ### 3.1 Data Format
121
-
122
- All datasets are stored in HDF5 format with specific structure depending on the PDE system.
123
-
124
- #### Directory Structure
125
-
126
- Datasets should be organized in the following directory structure:
127
-
128
- ```
129
- /path/to/data/
130
- ├── train/
131
- │ ├── param_file_1.h5
132
- │ ├── param_file_2.h5
133
- │ └── ...
134
- ├── validation/
135
- │ ├── param_file_3.h5
136
- │ └── ...
137
- └── test/
138
- ├── param_file_4.h5
139
- └── ...
140
- ```
141
-
142
- ### 3.2 Parameter Extraction from Filenames
143
-
144
- Parameters are extracted from filenames using pattern matching. For example:
145
-
146
- - **2D Reaction Diffusion**: `Du=0.1_Dv=0.2_k=0.05.h5`
147
- - Du = 0.1, Dv = 0.2, k = 0.05
148
-
149
- - **2D Navier Stokes**: `83.0.h5`
150
- - Reynolds number = 83.0
151
-
152
- - **1D KdV**: `delta=3.5_ic=42.h5`
153
- - δ = 3.5
154
-
155
- ### 3.3 Working with High-Resolution Data
156
-
157
- For high-resolution datasets, we provide configurations for downsampling:
158
-
159
- | PDE System | Original Resolution | High-Resolution |
160
- |------------|:-------------------:|:---------------:|
161
- | 2D Reaction Diffusion | 128×128 | 512×512 |
162
- | 2D Navier Stokes | 64×64 | 256×256 |
163
- | 2D Turbulent Flow | 64x64 | 2048x2048 |
164
- | Darcy Flow | 241×241 | 421×421 |
165
-
166
- When working with high-resolution data, set the following parameters:
167
-
168
- ```bash
169
- high_resolution=True
170
- data.downsample_factor=4 # e.g., for 512×512 → 128×128
171
- data.batch_size=2 # Reduce batch size for GPU memory
172
- ```
173
-
174
- ### 3.4 Data Loading Parameters
175
-
176
- Key parameters for loading data:
177
-
178
- - `data.every_nth_window`: Controls sampling frequency of time windows
179
- - `data.frac_ics_per_param`: Fraction of initial conditions per parameter to use
180
- - `data.frac_param_combinations`: Fraction of parameter combinations to use
181
- - `data.train_window_end_percent`: Percentage of trajectory used for training
182
- - `data.test_window_start_percent`: Percentage where test window starts
183
-
184
- ### 3.5 Parameter Normalization
185
-
186
- Parameters are normalized using the following statistics, where the mean and standard deviation are computed using the span of the parameters in the dataset:
187
-
188
- ```python
189
- PARAM_NORMALIZATION_STATS = {
190
- PDE.ReactionDiffusion2D: {
191
- "k": (0.06391126306498819, 0.029533048151465856), # (mean, std)
192
- "Du": (0.3094992685910578, 0.13865605073673604), # (mean, std)
193
- "Dv": (0.259514500345804, 0.11541850276902947), # (mean, std)
194
- },
195
- PDE.NavierStokes2D: {"re": (1723.425, 1723.425)}, # (mean, std)
196
- PDE.TurbulentFlow2D: {"nu": (0.001372469573118451, 0.002146258280849241)},
197
- PDE.KortewegDeVries1D: {"delta": (2.899999997019768, 1.2246211546444339)},
198
- # Add more as needed
199
- }
200
- ```
201
-
202
- ## 4. Datasets
203
-
204
- This section provides detailed information about each PDE system in the dataset. Each subsection includes visualizations, descriptions, and technical specifications.
205
-
206
- ### 4a. 2D Reaction Diffusion
207
-
208
- <img src="images/2drd_u_channel.png" alt="2DRD-Activator" width="400">
209
- <img src="images/2drd_v_channel.png" alt="2DRD-Inhibitor" width="400">
210
-
211
- **Description:** The 2D Reaction-Diffusion system models chemical reactions with spatial diffusion using the Fitzhugh-Nagumo equations. This dataset contains two-channel solutions (activator u and inhibitor v) with parameters k (threshold for excitement), Du (activator diffusivity), and Dv (inhibitor diffusivity). The system exhibits complex pattern formation including spots, stripes, and labyrinthine structures, spanning from dissipative to Turing bifurcations.
212
-
213
- **Mathematical Formulation:**
214
- The activator u and inhibitor v coupled system follows:
215
-
216
- ```
217
- ∂tu = Du∂xxu + Du∂yyu + Ru
218
- ∂tv = Dv∂xxv + Dv∂yyv + Rv
219
- ```
220
-
221
- where Ru and Rv are defined by the Fitzhugh-Nagumo equations:
222
-
223
- ```
224
- Ru(u,v) = u - u³ - k - v
225
- Rv(u,v) = u - v
226
- ```
227
-
228
- **Parameters of Interest:**
229
- - **Du**: Activator diffusion coefficient
230
- - **Dv**: Inhibitor diffusion coefficient
231
- - **k**: Threshold for excitement
232
-
233
- **Data Characteristics:**
234
- - Partial Derivatives: 5
235
- - Time-dependent: Yes (parabolic)
236
- - Spatial Resolutions: 128×128, 512x512
237
- - Parameters: k ∈ [0.005,0.1], Du ∈ [0.01,0.5], Dv ∈ [0.01,0.5]
238
- - Temporal Resolution: 0.049/5 seconds
239
- - Parameter Values: k - 28, Du - 28, Dv - 27
240
- - Initial Conditions/Trajectories: 5
241
-
242
- **Evaluation Splits:**
243
- - **Test (ID)**: k ∈ [0.01,0.04] ∪ [0.08,0.09], Du ∈ [0.08,0.2] ∪ [0.4,0.49], Dv ∈ [0.08,0.2] ∪ [0.4,0.49]
244
- - **OOD (Non-Extreme)**: k ∈ [0.04,0.08], Du ∈ [0.2,0.4], Dv ∈ [0.2,0.4]
245
- - **OOD (Extreme)**: k ∈ [0.001,0.01] ∪ [0.09,0.1], Du ∈ [0.02,0.08] ∪ [0.49,0.5], Dv ∈ [0.02,0.08] ∪ [0.49,0.5]
246
-
247
- **Generation Parameters:**
248
- - **Solver**: Explicit Runge-Kutta method of order 5(4) (RK45)
249
- - **Error Tolerance**: Relative error tolerance of 10⁻⁶
250
- - **Spatial Discretization**: Finite Volume Method (FVM) with uniform 128×128 grid
251
- - **Domain**: [-1,1] × [-1,1] with cell size Δx = Δy = 0.015625
252
- - **Burn-in Period**: 1 simulation second
253
- - **Dataset Simulation Time**: [0,5] seconds, 101 time steps
254
- - **Nominal Time Step**: Δt ≈ 0.05 seconds (adaptive)
255
- - **Generation Time**: ≈ 1 week on CPU
256
-
257
- **File Structure:**
258
- ```
259
- filename: Du=0.1_Dv=0.2_k=0.05.h5
260
- ```
261
- Contents:
262
- - `0001/data`: Solution field [time, spatial_dim_1, spatial_dim_2, channels]
263
- - `0001/grid/x`: x-coordinate grid points
264
- - `0001/grid/y`: y-coordinate grid points
265
- - `0001/grid/t`: Time points
266
-
267
- ### 4b. 2D Navier Stokes (Unforced)
268
-
269
- <img src="images/2dns.png" alt="2DNS" width="400">
270
-
271
- **Description:** The 2D Navier-Stokes equations describe incompressible fluid flow without external forcing. This dataset contains velocity field solutions with varying Reynolds numbers, showcasing different flow regimes from laminar to transitional flows.
272
-
273
- **Mathematical Formulation:**
274
- We consider the vorticity form of the unforced Navier-Stokes equations:
275
-
276
- ```
277
- ∂w(t,x,y)/∂t + u(t,x,y)·∇w(t,x,y) = νΔw(t,x,y)
278
- ```
279
-
280
- for t ∈ [0,T] and (x,y) ∈ (0,1)², with auxiliary conditions:
281
- - w = ∇ × u
282
- - ∇ · u = 0
283
- - w(0,x,y) = w₀(x,y) (Boundary Conditions)
284
-
285
- **Parameters of Interest:**
286
- - **ν**: The physical parameter of interest, representing viscosity
287
-
288
- **Data Characteristics:**
289
- - Partial Derivatives: 3
290
- - Time-dependent: Yes (parabolic)
291
- - Spatial Resolutions: 64×64, 256x256
292
- - Parameters: ν ∈ [10⁻⁴,10⁻²] (Reynolds: 80-8000)
293
- - Temporal Resolution: 0.0468/3 seconds
294
- - Parameter Values: 101
295
- - Initial Conditions/Trajectories: 192
296
-
297
- The files contain spatial resolutions at 256x256, which are later downsampled using scipy decimate to 64x64
298
-
299
-
300
- **Evaluation Splits:**
301
- - **Test (ID)**: ν ∈ [10⁻³·⁸, 10⁻³·²] ∪ [10⁻²·⁸, 10⁻²·²]
302
- - **OOD (Non-Extreme)**: ν ∈ [10⁻³·², 10⁻²·⁸]
303
- - **OOD (Extreme)**: ν ∈ [10⁻⁴, 10⁻³·⁸] ∪ [10⁻²·², 10⁻²]
304
-
305
- **Generation Parameters:**
306
- - **Solver**: Pseudo-spectral solver with Crank-Nicolson time-stepping
307
- - **Implementation**: Written in Jax and GPU-accelerated
308
- - **Generation Time**: ≈ 3.5 GPU days (batch size=32)
309
- - **Burn-in Period**: 15 simulation seconds
310
- - **Saved Data**: Next 3 simulation seconds saved as dataset
311
- - **Initial Conditions**: Sampled according to Gaussian random field (length scale=0.8)
312
- - **Recording**: Solution recorded every 1 simulation second
313
- - **Simulation dt**: 1e-4
314
- - **Resolution**: 256×256
315
-
316
- **File Structure:**
317
- ```
318
- filename: 83.0.h5
319
- ```
320
- Contents:
321
- - `0001/data`: Solution field [time, spatial_dim_1, spatial_dim_2, channels]
322
- - `0001/grid/x`: x-coordinate grid points
323
- - `0001/grid/y`: y-coordinate grid points
324
- - `0001/grid/t`: Time points
325
-
326
- ### 4c. 2D Turbulent Flow (Forced Navier Stokes)
327
-
328
- <img src="images/2dtf.png" alt="2DTF" width="400">
329
-
330
- **Description:** The 2D Turbulent Flow dataset represents forced Navier-Stokes equations that generate fully developed turbulent flows. This dataset is particularly valuable for studying complex, multi-scale fluid dynamics and turbulent phenomena. All solutions exhibit turbulence across various Reynolds numbers.
331
-
332
- **Mathematical Formulation:**
333
- The forced Navier-Stokes equations with the Kolmogorov forcing function are similar to the unforced case with an additional forcing term:
334
-
335
- ```
336
- ∂ₜw + u·∇w = νΔw + f(k,y) - αw
337
- ```
338
-
339
- where the forcing function f(k,y) is defined as:
340
- ```
341
- f(k,y) = -kcos(ky)
342
- ```
343
-
344
- **Parameters of Interest:**
345
- - **ν**: Kinematic viscosity (similar to unforced NS)
346
- - **α**: Drag coefficient (fixed at α = 0.1)
347
- - **k**: Forced wavenumber (fixed at k = 2)
348
-
349
- The drag coefficient α primarily serves to keep the total energy of the system constant, acting as drag. The task is to predict ν.
350
-
351
- **Numerical Convergence**
352
- We examine convergence across all solutions we generated. However, at the spatial and temporal resolution used to produce this dataset, simulations with kinematic viscosity ν < 5e-4 may not be fully converged due to the fine scale turbulence dynamics. We include all generated trajectories in the training set to maximize coverage of the parameter space and to expose models to a broader range of flow regimes. Nevertheless, we recommend restricting quantitative evaluation and model selection to runs with ν >= 5e-4. For more details, please see our paper.
353
-
354
- **Data Characteristics:**
355
- - Partial Derivatives: 3
356
- - Time-dependent: Yes (parabolic)
357
- - Spatial Resolutions: 64x64, 2048x2048
358
- - Parameters: ν ∈ [10⁻⁵,10⁻²]
359
- - Temporal Resolution: 0.23/14.75 seconds
360
- - Parameter Values: 120
361
- - Initial Conditions/Trajectories: 108
362
-
363
- **Evaluation Splits:**
364
- - **Test (ID)**: ν ∈ [10⁻⁴·⁷, 10⁻³·⁸] ∪ [10⁻³·², 10⁻²·³]
365
- - **OOD (Non-Extreme)**: ν ∈ [10⁻³·⁸, 10⁻³·²]
366
- - **OOD (Extreme)**: ν ∈ [10⁻⁵, 10⁻⁴·⁷] ∪ [10⁻²·³, 10⁻²]
367
-
368
- **Generation Parameters:**
369
- - **Solver**: Pseudo-spectral solver with Crank-Nicolson time-stepping
370
- - **Implementation**: Written in Jax (leveraging Jax-CFD), similar to 2D NS
371
- - **Generation Time**: ≈ 4 GPU days (A100)
372
- - **Burn-in Period**: 40 simulation seconds
373
- - **Saved Data**: Next 15 simulation seconds saved as dataset
374
- - **Simulator Resolution**: 256×256
375
- - **Downsampling**: Downsamples to 64×64 before saving
376
- - **Temporal Resolution (Saved)**: ∂t = 0.25 simulation seconds
377
-
378
- **File Structure:**
379
- ```
380
- filename: nu=0.001.h5
381
- ```
382
- Contents:
383
- - `0001/data`: Solution field [time, spatial_dim_1, spatial_dim_2, channels]
384
- - `0001/grid/x`: x-coordinate grid points
385
- - `0001/grid/y`: y-coordinate grid points
386
- - `0001/grid/t`: Time points
387
-
388
- ### 4d. 1D Korteweg-De Vries
389
-
390
- <img src="images/1dkdv.png" alt="KdV" width="400">
391
-
392
- **Description:** The Korteweg-De Vries (KdV) equation is a nonlinear partial differential equation that describes shallow water waves and solitons. This 1D dataset contains soliton solutions with varying dispersion parameters, demonstrating wave propagation and interaction phenomena.
393
-
394
- **Mathematical Formulation:**
395
- KdV is a 1D PDE representing waves on a shallow-water surface. The governing equation follows the form:
396
-
397
- ```
398
- 0 = ∂ₜu + u·∂ₓu + δ²∂ₓₓₓu
399
- ```
400
-
401
- **Parameters of Interest:**
402
- - **δ**: The physical parameter representing the strength of the dispersive effect on the system
403
- - In shallow water wave theory, δ is a unit-less quantity roughly indicating the relative depth of the water
404
-
405
- **Data Characteristics:**
406
- - Partial Derivatives: 3
407
- - Time-dependent: Yes (hyperbolic)
408
- - Spatial Resolution: 256
409
- - Parameters: δ ∈ [0.8,5]
410
- - Temporal Resolution: 0.73/102 seconds
411
- - Parameter Values: 100
412
- - Initial Conditions/Trajectories: 100
413
-
414
- **Evaluation Splits:**
415
- - **Test (ID)**: δ ∈ [1.22, 2.48] ∪ [3.32, 4.58]
416
- - **OOD (Non-Extreme)**: δ ∈ [2.48, 3.32]
417
- - **OOD (Extreme)**: δ ∈ [0.8, 1.22] ∪ [4.58, 5]
418
-
419
- **Generation Parameters:**
420
- - **Domain**: Periodic domain [0,L]
421
- - **Spatial Discretization**: Pseudospectral method with Fourier basis (Nₓ = 256 grid points)
422
- - **Time Integration**: Implicit Runge-Kutta method (Radau IIA, order 5)
423
- - **Implementation**: SciPy's `solve_ivp` on CPU
424
- - **Generation Time**: ≈ 12 hours
425
- - **Burn-in Period**: 40 simulation seconds
426
-
427
- **Initial Conditions:**
428
- Initial conditions are sampled from a distribution over a truncated Fourier Series:
429
-
430
- ```
431
- u₀(x) = Σ_{k=1}^K A_k sin(2πl_k x/L + φ_k)
432
- ```
433
-
434
- where:
435
- - A_k, φ_k ~ U(0,1)
436
- - l_k ~ U(1,3)
437
-
438
- **File Structure:**
439
- ```
440
- filename: delta=3.5_ic=42.h5
441
- ```
442
- Contents:
443
- - `tensor`: Solution field with shape [time, spatial_dim]
444
- - `x-coordinate`: Spatial grid points
445
- - `t-coordinate`: Time points
446
-
447
- ### 4e. 2D Darcy Flow
448
-
449
- <img src="images/2ddf.png" alt="2DDF" width="400">
450
-
451
- **Description:** The 2D Darcy Flow dataset represents steady-state flow through porous media with piecewise constant diffusion coefficients. This time-independent system is commonly used in groundwater flow modeling and subsurface transport problems. All solutions converge to a non-trivial steady-state solution based on the diffusion coefficient field.
452
-
453
- **Mathematical Formulation:**
454
- The 2D steady-state Darcy flow equation on a unit box Ω = (0,1)² is a second-order linear elliptic PDE with Dirichlet boundary conditions:
455
-
456
- ```
457
- -∇·(a(x)∇u(x)) = f(x), for x ∈ Ω
458
- u(x) = 0, for x ∈ ∂Ω
459
- ```
460
-
461
- where:
462
- - a ∈ L∞((0,1)²;R⁺) is a piecewise constant diffusion coefficient
463
- - u(x) is the pressure field
464
- - f(x) = 1 is a fixed forcing function
465
-
466
- **Parameters of Interest:**
467
- - **a(x)**: Piecewise constant diffusion coefficient field (spatially varying parameter)
468
-
469
- **Data Characteristics:**
470
- - Partial Derivatives: 2
471
- - Time-dependent: No (elliptic)
472
- - Spatial Resolutions: 241×241, 421×421
473
- - Parameters: Piecewise constant diffusion coefficient a ∈ L∞((0,1)²;R⁺)
474
- - Temporal Resolution: N/A (steady-state)
475
- - Parameter Values: 2048
476
- - Initial Conditions/Trajectories: N/A
477
-
478
- **Evaluation Splits:**
479
-
480
- Unlike time-dependent systems with scalar parameters, Darcy Flow does not admit parameter splits based on numeric ranges. Instead, splits are defined using a derived statistic of the coefficient field.
481
-
482
- Let \( r(a) \) denote the fraction of grid points in the coefficient field \( a(x) \) that take the maximum value (12).
483
- This statistic is approximately normally distributed across coefficient fields.
484
-
485
- Splits are defined as:
486
-
487
- - **Test (ID):** Coefficient fields whose \( r(a) \) lies within the central mass of the distribution
488
- - **OOD (Non-Extreme):** Not applicable
489
- - **OOD (Extreme):** Coefficient fields whose \( r(a) \) lies in the tails beyond \( \pm 1.5\sigma \)
490
-
491
-
492
- **Generation Parameters:**
493
- - **Solver**: Second-order finite difference method
494
- - **Implementation**: Originally written in Matlab, runs on CPU
495
- - **Resolution**: 421×421 (original), with lower resolution dataset generated by downsampling
496
- - **Coefficient Field Sampling**: a(x) is sampled from μ = Γ(N(0, -Δ + 9I)⁻²)
497
- - **Gamma Mapping**: Element-wise map where a_i ~ N(0, -Δ + 9I)⁻² → {3,12}
498
- - a_i → 12 when a_i ≥ 0
499
- - a_i → 3 when a_i < 0
500
- - **Boundary Conditions**: Zero Neumann boundary conditions on the Laplacian over the coefficient field
501
-
502
- **File Structure:**
503
- ```
504
- filename: sample_1024.h5
505
- ```
506
- Contents:
507
- - `coeff`: Piecewise constant coefficient field
508
- - `sol`: Solution field
509
-
510
-
511
- ## 5. Adding a New Dataset
512
-
513
- The PDEInvBench framework is designed to be modular, allowing you to add new PDE systems. This section describes how to add a new dataset to the repository. For information about data format requirements, see [Section 4.1](#41-data-format).
514
-
515
- ### Table of Contents
516
- - [Step 1: Add PDE Type to Utils](#step-1-add-pde-type-to-utils)
517
- - [Step 2: Add PDE Attributes](#step-2-add-pde-attributes)
518
- - [Step 3: Add Parameter Normalization Stats](#step-3-add-parameter-normalization-stats)
519
- - [Step 4: Add Parameter Extraction Logic](#step-4-add-parameter-extraction-logic)
520
- - [Step 5: Create a Dataset Handler](#step-5-create-a-dataset-handler-if-needed)
521
- - [Step 6: Create a Data Configuration](#step-6-create-a-data-configuration)
522
- - [Step 7: Add Residual Functions](#step-7-add-residual-functions)
523
- - [Step 8: Create a Combined Configuration](#step-8-create-a-combined-configuration)
524
- - [Step 9: Generate and Prepare Data](#step-9-generate-and-prepare-data)
525
- - [Step 10: Run Experiments](#step-10-run-experiments)
526
- - [Data Format Requirements](#data-format-requirements)
527
-
528
- ### Step 1: Add PDE Type to Utils
529
-
530
- First, add your new PDE system to `pdeinvbench/utils/types.py`:
531
-
532
- ```python
533
- class PDE(enum.Enum):
534
- """
535
- Describes which PDE system currently being used.
536
- """
537
- # Existing PDEs...
538
- ReactionDiffusion1D = "Reaction Diffusion 1D"
539
- ReactionDiffusion2D = "Reaction Diffusion 2D"
540
- NavierStokes2D = "Navier Stokes 2D"
541
- # Add your new PDE
542
- YourNewPDE = "Your New PDE Description"
543
- ```
544
-
545
- ### Step 2: Add PDE Attributes
546
-
547
- Update the attribute dictionaries in `pdeinvbench/utils/types.py` with information about your new PDE:
548
-
549
- ```python
550
- # Number of partial derivatives
551
- PDE_PARTIALS = {
552
- # Existing PDEs...
553
- PDE.YourNewPDE: 3, # Number of partial derivatives needed
554
- }
555
-
556
- # Number of spatial dimensions
557
- PDE_NUM_SPATIAL = {
558
- # Existing PDEs...
559
- PDE.YourNewPDE: 2, # 1 for 1D PDEs, 2 for 2D PDEs
560
- }
561
-
562
- # Spatial size of the grid
563
- PDE_SPATIAL_SIZE = {
564
- # Existing PDEs...
565
- PDE.YourNewPDE: [128, 128], # Spatial dimensions of your dataset
566
- }
567
-
568
- # High-resolution spatial size (if applicable)
569
- HIGH_RESOLUTION_PDE_SPATIAL_SIZE = {
570
- # Existing PDEs...
571
- PDE.YourNewPDE: [512, 512], # High-res dimensions
572
- }
573
-
574
- # Number of parameters
575
- PDE_NUM_PARAMETERS = {
576
- # Existing PDEs...
577
- PDE.YourNewPDE: 2, # Number of parameters in your PDE
578
- }
579
-
580
- # Parameter values
581
- PDE_PARAM_VALUES = {
582
- # Existing PDEs...
583
- PDE.YourNewPDE: {
584
- "param1": [0.1, 0.2, 0.3], # List of possible values for param1
585
- "param2": [1.0, 2.0, 3.0], # List of possible values for param2
586
- },
587
- }
588
-
589
- # Number of data channels
590
- PDE_NUM_CHANNELS = {
591
- # Existing PDEs...
592
- PDE.YourNewPDE: 2, # Number of channels in your solution field
593
- }
594
-
595
- # Number of timesteps in the trajectory
596
- PDE_TRAJ_LEN = {
597
- # Existing PDEs...
598
- PDE.YourNewPDE: 100, # Number of timesteps in your trajectories
599
- }
600
- ```
601
-
602
- ### Step 3: Add Parameter Normalization Stats
603
-
604
- Update `pdeinvbench/data/utils.py` with normalization statistics for your PDE parameters:
605
-
606
- ```python
607
- PARAM_NORMALIZATION_STATS = {
608
- # Existing PDEs...
609
- PDE.YourNewPDE: {
610
- "param1": (0.2, 0.05), # (mean, std) for param1
611
- "param2": (2.0, 0.5), # (mean, std) for param2
612
- },
613
- }
614
- ```
615
-
616
- ### Step 4: Add Parameter Extraction Logic
617
-
618
- Add logic to extract parameters from your dataset files in `extract_params_from_path` function inside the dataset class:
619
-
620
- ```python
621
- def extract_params_from_path(path: str, pde: PDE) -> dict:
622
- # Existing code...
623
- elif pde == PDE.YourNewPDE:
624
- # Parse the filename to extract parameters
625
- name = os.path.basename(path)
626
- # Example: extract parameters from filename format "param1=X_param2=Y.h5"
627
- param1 = torch.Tensor([float(name.split("param1=")[1].split("_")[0])])
628
- param2 = torch.Tensor([float(name.split("param2=")[1].split(".")[0])])
629
- param_dict = {"param1": param1, "param2": param2}
630
- # Existing code...
631
- return param_dict
632
- ```
633
-
634
- ### Step 5: Create a Dataset Handler (if needed)
635
-
636
- If your PDE requires special handling beyond what `PDE_MultiParam` provides, create a new dataset class in `pdeinvbench/data/`:
637
-
638
- ```python
639
- # Example: pdeinvbench/data/your_new_pde_dataset.py
640
- import torch
641
- from torch.utils.data import Dataset
642
-
643
- class YourNewPDEDataset(Dataset):
644
- """
645
- Custom dataset class for your new PDE system.
646
- """
647
- def __init__(
648
- self,
649
- data_root: str,
650
- pde: PDE,
651
- n_past: int,
652
- n_future: int,
653
- mode: str,
654
- train: bool,
655
- # Other parameters...
656
- ):
657
- # Initialization code...
658
- pass
659
-
660
- def __len__(self):
661
- # Implementation...
662
- pass
663
-
664
- def __getitem__(self, index: int):
665
- # Implementation...
666
- pass
667
- ```
668
-
669
- Add your new dataset to `pdeinvbench/data/__init__.py`:
670
-
671
- ```python
672
- from .pde_multiparam import PDE_MultiParam
673
- from .your_new_pde_dataset import YourNewPDEDataset
674
-
675
- __all__ = ["PDE_MultiParam", "YourNewPDEDataset"]
676
- ```
677
-
678
- ```markdown
679
- ### Step 6: Create System Configuration
680
-
681
- Create `configs/system_params/your_new_pde.yaml`:
682
-
683
- ```yaml
684
- # configs/system_params/your_new_pde.yaml
685
- defaults:
686
- - base
687
-
688
- # ============ Data Parameters ============
689
- name: "your_new_pde_inverse"
690
- data_root: "/path/to/your/data"
691
- pde_name: "Your New PDE Description" # Must match PDE enum value
692
- num_channels: 2 # Number of solution channels (e.g., u and v)
693
- cutoff_first_n_frames: 0 # How many initial frames to skip
694
-
695
- # ============ Model Parameters ============
696
- downsampler_input_dim: 2 # 1 for 1D systems, 2 for 2D systems
697
- params_to_predict: ["param1", "param2"] # What parameters to predict
698
- normalize: True # Whether to normalize predicted parameters
699
- ```
700
-
701
- Then create the top-level config `configs/your_new_pde.yaml`:
702
-
703
- ```yaml
704
- # configs/your_new_pde.yaml
705
- name: your_new_pde
706
- defaults:
707
- - _self_
708
- - base
709
- - override system_params: your_new_pde
710
- ```
711
-
712
- The existing configs/data/base.yaml automatically references ${system_params.*} so data loading works out of the box. Run experiments with:
713
-
714
-
715
- ```yaml
716
- python train_inverse.py --config-name=your_new_pde
717
- python train_inverse.py --config-name=your_new_pde model=fno
718
- python train_inverse.py --config-name=your_new_pde model=resnet
719
- ```
720
-
721
- ### Step 7: Add Residual Functions
722
-
723
- Implement residual functions for your PDE in `pdeinvbench/losses/pde_residuals.py`:
724
-
725
- ```python
726
- def your_new_pde_residual(
727
- sol: torch.Tensor,
728
- params: Dict[str, torch.Tensor],
729
- spatial_grid: Tuple[torch.Tensor, ...],
730
- t: torch.Tensor,
731
- return_partials: bool = False,
732
- ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
733
- """
734
- Compute the residual for your new PDE.
735
-
736
- Args:
737
- sol: Solution field
738
- params: Dictionary of PDE parameters
739
- spatial_grid: Spatial grid coordinates
740
- t: Time coordinates
741
- return_partials: Whether to return partial derivatives
742
-
743
- Returns:
744
- Residual tensor or (residual, partials) if return_partials=True
745
- """
746
- # Implementation...
747
- pass
748
- ```
749
-
750
- Register your residual function in `get_pde_residual_function`:
751
-
752
- ```python
753
- def get_pde_residual_function(pde: PDE) -> Callable:
754
- """Return the appropriate residual function for the given PDE."""
755
- if pde == PDE.ReactionDiffusion2D:
756
- return reaction_diffusion_2d_residual
757
- # Add your PDE
758
- elif pde == PDE.YourNewPDE:
759
- return your_new_pde_residual
760
- # Other PDEs...
761
- else:
762
- raise ValueError(f"Unknown PDE type: {pde}")
763
- ```
764
-
765
- ### Step 8: Create a Combined Configuration
766
-
767
- Create a combined configuration that uses your dataset:
768
-
769
- ```yaml
770
- # configs/your_new_pde.yaml
771
- name: "your_new_pde"
772
- defaults:
773
- - _self_
774
- - base
775
- - override data: your_new_pde
776
- ```
777
-
778
- ### Step 9: Generate and Prepare Data
779
-
780
- Make sure your data is properly formatted and stored in the expected directory structure:
781
-
782
- ```
783
- /path/to/your/data/
784
- ├── train/
785
- │ ├── param1=0.1_param2=1.0.h5
786
- │ ├── param1=0.2_param2=2.0.h5
787
- │ └── ...
788
- ├── validation/
789
- │ ├── param1=0.15_param2=1.5.h5
790
- │ └── ...
791
- └── test/
792
- ├── param1=0.25_param2=2.5.h5
793
- └── ...
794
- ```
795
-
796
- Each HDF5 file should contain:
797
- - Solution trajectories
798
- - Grid information (x, y, t)
799
- - Any other metadata needed for your PDE
800
-
801
- ### Step 10: Run Experiments
802
-
803
- You can now run experiments with your new dataset:
804
-
805
- ```bash
806
- python train_inverse.py --config-name=your_new_pde
807
- ```
808
-
809
- ### Data Format Requirements
810
-
811
- The primary dataset class `PDE_MultiParam` expects data in HDF5 format with specific structure:
812
-
813
- - **1D PDEs**: Each HDF5 file contains a single trajectory with keys:
814
- - `tensor`: The solution field with shape `[time, spatial_dim]`
815
- - `x-coordinate`: Spatial grid points
816
- - `t-coordinate`: Time points
817
-
818
- - **2D PDEs**: Each HDF5 file contains multiple trajectories (one per IC):
819
- - `0001/data`: Solution field with shape `[time, spatial_dim_1, spatial_dim_2, channels]`
820
- - `0001/grid/x`: x-coordinates
821
- - `0001/grid/y`: y-coordinates
822
- - `0001/grid/t`: Time points
823
-
824
- - **File naming**: The filename should encode the PDE parameters, following the format expected by `extract_params_from_path`
825
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
MODEL_GUIDE.md DELETED
@@ -1,352 +0,0 @@
1
- # PDEInvBench
2
- ## Adding a New Model
3
-
4
- The PDEInvBench framework is designed to be modular, allowing you to easily add new model architectures. This section describes how to add a new encoder architecture to the repository.
5
-
6
- ## Table of Contents
7
- - [Model Architecture Components](#model-architecture-components)
8
- - [Adding a new model](#adding-a-new-model)
9
- - [Step 1: Create a New Encoder Class](#step-1-create-a-new-encoder-class)
10
- - [Step 2: Import and Register Your Model](#step-2-import-and-register-your-model)
11
- - [Step 3: Create a Configuration File](#step-3-create-a-configuration-file)
12
- - [Step 4: Run Experiments with Your Model](#step-4-run-experiments-with-your-model)
13
-
14
- ## Model Architecture Components
15
-
16
- The inverse model architecture in PDEInvBench consists of three main components:
17
-
18
-
19
- ```
20
- Input Solution Field → Encoder → Downsampler → Parameter Network → PDE Parameters
21
- ```
22
-
23
- 1. **Encoder**: Extracts features from the input solution field (e.g., FNO, ResNet, ScOT)
24
- 2. **Downsampler**: Reduces the spatial dimensions of the features (e.g., ConvDownsampler)
25
- 3. **Parameter Network**: Predicts PDE parameters from the downsampled features
26
-
27
-
28
- ## Adding a new model
29
-
30
- When creating a new model, you typically only need to modify one of these components while keeping the others the same.
31
-
32
- ### Step 1: Create a New Encoder Class
33
-
34
- First, create a new encoder class in `pdeinvbench/models/encoder.py`. Your new encoder should follow the interface of existing encoders like `FNOEncoder`, `ResnetEncoder`, or `SwinEncoder`:
35
-
36
- ```python
37
- import torch
38
- import torch.nn as nn
39
- from pdeinvbench.utils.types import PDE
40
- from pdeinvbench.models.encoder import resolve_number_input_channels
41
-
42
- class YourEncoder(nn.Module):
43
- """
44
- Your custom encoder for PDE inverse problems.
45
- """
46
-
47
- def __init__(
48
- self,
49
- n_modes: int, # Or equivalent parameter for your architecture
50
- n_layers: int,
51
- n_past: int,
52
- n_future: int,
53
- pde: PDE,
54
- data_channels: int,
55
- hidden_channels: int,
56
- use_partials: bool,
57
- mode: str,
58
- batch_size: int
59
- # Add any architecture-specific parameters
60
- ):
61
- super(YourEncoder, self).__init__()
62
-
63
- # Store essential parameters
64
- self.n_past = n_past
65
- self.n_future = n_future
66
- self.pde = pde
67
- self.data_channels = data_channels
68
- self.hidden_channels = hidden_channels
69
- self.use_partials = use_partials
70
- self.mode = mode
71
- self.batch_size = batch_size
72
-
73
-
74
- # Calculate input channels similar to existing encoders
75
- in_channels = resolve_number_input_channels(
76
- n_past=n_past,
77
- data_channels=data_channels,
78
- use_partials=use_partials,
79
- pde=pde,
80
- )
81
-
82
- # Define your model architecture
83
- # Example: Custom neural network layers
84
- self.encoder_layers = nn.ModuleList([
85
- # Your custom layers here
86
- nn.Conv2d(in_channels, hidden_channels, kernel_size=3, padding=1),
87
- nn.ReLU(),
88
- # Add more layers as needed
89
- ])
90
-
91
- # Output layer to match expected output dimensions
92
- self.output_layer = nn.Conv2d(hidden_channels, hidden_channels, kernel_size=1)
93
-
94
- def forward(self, x, **kwargs):
95
- """
96
- Forward pass of your encoder.
97
-
98
- Args:
99
- x: Input tensor of shape [batch, channels, height, width]
100
- **kwargs: Additional arguments (may include 't' for time-dependent models)
101
-
102
- Returns:
103
- Output tensor of shape [batch, hidden_channels, height, width]
104
- """
105
- # Implement your forward pass
106
- for layer in self.encoder_layers:
107
- x = layer(x)
108
-
109
- x = self.output_layer(x)
110
- return x
111
- ```
112
-
113
- #### Creating Custom Downsamplers
114
-
115
- If you need a custom downsampler, create it in `pdeinvbench/models/downsampler.py`:
116
-
117
- ```python
118
- import torch
119
- import torch.nn as nn
120
-
121
- class YourDownsampler(nn.Module):
122
- """
123
- Your custom downsampler for reducing spatial dimensions.
124
- """
125
-
126
- def __init__(
127
- self,
128
- input_dimension: int,
129
- n_layers: int,
130
- in_channels: int,
131
- out_channels: int,
132
- kernel_size: int,
133
- stride: int,
134
- padding: int,
135
- dropout: float,
136
- ):
137
- super(YourDownsampler, self).__init__()
138
-
139
- # Define your downsampling layers
140
- self.layers = nn.ModuleList([
141
- # Your custom downsampling layers here
142
- nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
143
- nn.ReLU(),
144
- nn.Dropout(dropout),
145
- ])
146
-
147
- def forward(self, x):
148
- """
149
- Forward pass of your downsampler.
150
-
151
- Args:
152
- x: Input tensor of shape [batch, channels, height, width]
153
-
154
- Returns:
155
- Downsampled tensor
156
- """
157
- for layer in self.layers:
158
- x = layer(x)
159
- return x
160
- ```
161
-
162
- #### Creating Custom MLPs
163
-
164
- If you need a custom MLP, create it in `pdeinvbench/models/mlp.py`:
165
-
166
- ```python
167
- import torch
168
- import torch.nn as nn
169
-
170
- class YourMLP(nn.Module):
171
- """
172
- Your custom MLP for parameter prediction.
173
- """
174
-
175
- def __init__(
176
- self,
177
- in_dim: int,
178
- hidden_size: int,
179
- dropout: float,
180
- out_dim: int,
181
- num_layers: int,
182
- activation: str,
183
- ):
184
- super(YourMLP, self).__init__()
185
-
186
- # Define your MLP layers
187
- layers = []
188
- current_dim = in_dim
189
-
190
- for i in range(num_layers):
191
- layers.append(nn.Linear(current_dim, hidden_size))
192
- layers.append(nn.ReLU() if activation == "relu" else nn.Tanh())
193
- layers.append(nn.Dropout(dropout))
194
- current_dim = hidden_size
195
-
196
- layers.append(nn.Linear(current_dim, out_dim))
197
- self.layers = nn.Sequential(*layers)
198
-
199
- def forward(self, x):
200
- """
201
- Forward pass of your MLP.
202
-
203
- Args:
204
- x: Input tensor of shape [batch, features]
205
-
206
- Returns:
207
- Output tensor of shape [batch, out_dim]
208
- """
209
- return self.layers(x)
210
- ```
211
-
212
- ### Step 2: Import and Register Your Model
213
-
214
- Make sure your encoder is imported in `pdeinvbench/models/__init__.py`:
215
-
216
- ```python
217
- from .encoder import FNOEncoder, ResnetEncoder, ScOTEncoder, YourEncoder
218
- ```
219
-
220
- This makes your encoder available for use in configuration files.
221
-
222
- ### Step 3: Create a Configuration File
223
-
224
- The configuration system has three levels:
225
-
226
- #### 3.1: Create Model Architecture Config
227
-
228
- Create `configs/model/yourmodel.yaml`:
229
-
230
- ```yaml
231
- # configs/model/yourmodel.yaml
232
- name: "${system_params.name}_yourmodel"
233
- dropout: ${system_params.yourmodel_dropout}
234
- predict_variance: False
235
- hidden_channels: ${system_params.yourmodel_hidden_channels}
236
- encoder_layers: ${system_params.yourmodel_encoder_layers}
237
- downsampler_layers: ${system_params.yourmodel_downsampler_layers}
238
- mlp_layers: ${system_params.yourmodel_mlp_layers}
239
-
240
- model_config:
241
- _target_: pdeinvbench.models.inverse_model.InverseModel
242
- paramnet:
243
- _target_: pdeinvbench.models.inverse_model.ParameterNet
244
- pde: ${data.pde}
245
- normalize: ${system_params.normalize}
246
- logspace: ${system_params.logspace}
247
- params_to_predict: ${system_params.params_to_predict}
248
- predict_variance: ${model.predict_variance}
249
- mlp_type: ${system_params.mlp_type}
250
- encoder:
251
- _target_: pdeinvbench.models.encoder.YourEncoder
252
- n_modes: ${system_params.yourmodel_n_modes}
253
- n_past: ${n_past}
254
- n_future: ${n_future}
255
- n_layers: ${model.encoder_layers}
256
- data_channels: ${data.num_channels}
257
- hidden_channels: ${model.hidden_channels}
258
- use_partials: True
259
- pde: ${data.pde}
260
- mode: ${mode}
261
- batch_size: ${data.batch_size}
262
- use_cn: false
263
- task: inverse
264
- downsampler: ${system_params.yourmodel_downsampler}
265
- mlp_hidden_size: ${model.hidden_channels}
266
- mlp_layers: ${model.mlp_layers}
267
- mlp_activation: "relu"
268
- mlp_dropout: ${model.dropout}
269
- downsample_factor: ${data.downsample_factor}
270
- ```
271
-
272
- #### 3.2: Add Defaults to `configs/system_params/base.yaml`
273
-
274
- Add architecture defaults that work across all PDE systems:
275
-
276
- ```yaml
277
- # configs/system_params/base.yaml
278
-
279
- # ============ YourModel Architecture ============
280
- yourmodel_hidden_channels: 64
281
- yourmodel_encoder_layers: 4
282
- yourmodel_downsampler_layers: 4
283
- yourmodel_dropout: 0
284
- yourmodel_mlp_layers: 1
285
- yourmodel_n_modes: 16
286
-
287
- yourmodel_downsampler:
288
- _target_: pdeinvbench.models.downsampler.ConvDownsampler
289
- input_dimension: ${system_params.downsampler_input_dim}
290
- n_layers: ${model.downsampler_layers}
291
- in_channels: ${model.hidden_channels}
292
- out_channels: ${model.hidden_channels}
293
- kernel_size: 3
294
- stride: 1
295
- padding: 2
296
- dropout: ${model.dropout}
297
- ```
298
-
299
- #### 3.3: (Optional) Add System-Specific Overrides
300
-
301
- Override defaults for specific systems in `configs/system_params/{system}.yaml`:
302
-
303
- ```yaml
304
- # configs/system_params/2dtf.yaml
305
- defaults:
306
- - base
307
-
308
- # ... existing system config ...
309
-
310
- # Override architecture for this system
311
- yourmodel_hidden_channels: 128 # Needs larger model
312
- yourmodel_encoder_layers: 6
313
- ```
314
-
315
- **That's it!** Your model now works with all PDE systems:
316
- ```bash
317
- python train_inverse.py --config-name=1dkdv model=yourmodel
318
- python train_inverse.py --config-name=2dtf model=yourmodel
319
- ```
320
-
321
-
322
- #### Important Notes
323
-
324
- - **System-specific parameters** (like `params_to_predict`, `normalize`, `downsampler_input_dim`) go in `configs/system_params/{system}.yaml`
325
- - **Architecture defaults** go in `configs/system_params/base.yaml`
326
- - **Model structure** goes in `configs/model/{architecture}.yaml`
327
- - For special cases like Darcy Flow, override the downsampler in the system_params file:
328
- ```yaml
329
- # configs/system_params/2ddf.yaml
330
- yourmodel_downsampler:
331
- _target_: pdeinvbench.models.downsampler.IdentityMap
332
- ```
333
-
334
- ### Step 4: Run Experiments with Your Model
335
-
336
- You can now run experiments with your custom model on **any** PDE system:
337
-
338
- ```bash
339
- # Use your model with different PDE systems
340
- python train_inverse.py --config-name=1dkdv model=yourmodel
341
- python train_inverse.py --config-name=2dtf model=yourmodel
342
- python train_inverse.py --config-name=2dns model=yourmodel
343
-
344
- # Use model variants if you created them
345
- python train_inverse.py --config-name=2drdk model=yourmodel_large
346
-
347
- # Override parameters from command line
348
- python train_inverse.py --config-name=2dtf model=yourmodel model.hidden_channels=96
349
-
350
- # Combine multiple overrides
351
- python train_inverse.py --config-name=2ddf model=yourmodel data.batch_size=16 model.encoder_layers=6
352
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,515 +0,0 @@
1
- # PDEInvBench
2
-
3
- A one-stop shop repository for the benchmarking Neural Operators on inverse problems in partial differential equations.
4
-
5
- <img src="images/pde_objectives_main_fig_1.png" alt="" width="400">
6
-
7
- ## Overview
8
-
9
- Inverse problems in partial differential equations (PDEs) involve recovering unknown physical parameters of a system—such as viscosity, diffusivity, or reaction coefficients—from observed spatiotemporal solution fields. Formally, given a PDE
10
-
11
- $$[F_{\phi}(u(x,t)) = 0]$$
12
-
13
- where *u(x,t)* is the solution field and *φ* represents physical parameters, the **forward problem** maps *φ ↦ u*, while the **inverse problem** seeks the reverse mapping *u ↦ φ*.
14
-
15
- Inverse problems are inherently ill-posed and highly sensitive to noise, making them a challenging yet foundational task in scientific computing and engineering. They arise in diverse applications such as geophysical exploration, fluid mechanics, biomedical imaging, and materials design—where estimating hidden parameters from observed dynamics is essential.
16
-
17
- **PDEInvBench** provides a comprehensive benchmark for inverse problems in partial differential equations (PDEs). The codebase supports multiple PDE systems, training strategies, and neural network architectures.
18
-
19
- ## DATASET LINK:
20
- The datasets used in this project can be found here:
21
- https://huggingface.co/datasets/DabbyOWL/PDE_Inverse_Problem_Benchmarking/tree/main
22
-
23
-
24
- ## Table of Contents
25
- 1. [Overview](#overview)
26
- 2. [Supported Systems](#supported-systems)
27
- 3. [Supported Inverse Methods](#supported-inverse-methods)
28
- 4. [Models Implemented](#models-implemented)
29
- 5. [Directory Structure](#directory-structure)
30
- 6. [Environment Setup](#environment-setup)
31
- 7. [Downloading Data](#downloading-data)
32
- 8. [Running Experiments](#running-experiments)
33
- - [How Configs Work](#how-configs-work)
34
- - [Basic Commands](#basic-commands)
35
- - [Common Overrides](#common-overrides)
36
- - [Multi-GPU and Distributed Training](#multi-gpu-and-distributed-training)
37
- - [Experiment Patterns Along Core Design Axes](#-experiment-patterns-along-core-design-axes)
38
- - [Training/Optimization Strategies](#1️⃣-trainingoptimization-strategies)
39
- - [Problem Representation and Inductive Bias](#2️⃣-problem-representation-and-inductive-bias)
40
- - [Scaling Experiments](#3️⃣-scaling-experiments)
41
-
42
- 9. [Testing](#Testing)
43
- 10. [Shape Checking](#Shape-Checking)
44
- 11. [Adding a New Model](#adding-a-new-model)
45
- 12. [Adding a New Dataset](#adding-a-new-dataset)
46
-
47
- ## Supported Systems
48
-
49
- - **[1D Korteweg–De Vries (KdV) Equation](DATA_GUIDE.md#4d-1d-korteweg-de-vries)**
50
- - **[2D Reaction Diffusion](DATA_GUIDE.md#4a-2d-reaction-diffusion)**
51
- - **[2D Unforced Navier Stokes](DATA_GUIDE.md#4b-2d-navier-stokes-unforced)**
52
- - **[2D Forced Navier Stokes](DATA_GUIDE.md#4c-2d-turbulent-flow-forced-navier-stokes)**
53
- - **[2D Darcy Flow](DATA_GUIDE.md#4e-2d-darcy-flow)**
54
-
55
- For detailed technical information on each PDE system — including governing equations, parameter ranges, and dataset download instructions — refer to the [Data Guide](DATA_GUIDE.md).
56
-
57
- ## Supported Inverse Methods
58
-
59
- - **Fully data-driven**
60
- - **PDE Residual Loss**
61
- - **Test-Time Tailoring (TTT)**
62
-
63
- ## Models Implemented
64
-
65
- - **[FNO (Fourier Neural Operator)](https://arxiv.org/pdf/2010.08895)**
66
- - **[scOT (scalable Operator Transformer)](https://proceedings.neurips.cc/paper_files/paper/2024/file/84e1b1ec17bb11c57234e96433022a9a-Paper-Conference.pdf)**
67
- - **[ResNet](https://arxiv.org/pdf/1512.03385)**
68
-
69
- For detailed technical information on the model architecture, refer to the [Model Guide](MODEL_GUIDE.md).
70
-
71
-
72
- ## Directory Structure
73
-
74
- ```
75
- PDEInvBench
76
- ├── configs/ # Inverse problem Hydra configuration files
77
- │ ├── callbacks/ # Training callbacks (checkpointing, logging)
78
- │ ├── data/ # Dataset and data loading configurations
79
- │ ├── lightning_module/ # PyTorch Lightning module configurations
80
- │ ├── logging/ # Weights & Biases logging configurations
81
- │ ├── loss/ # Loss function configurations
82
- │ ├── lr_scheduler/ # Learning rate scheduler configurations
83
- │ ├── model/ # Neural network model configurations
84
- │ ├── optimizer/ # Optimizer configurations
85
- | ├── system_params # PDE-specific model and experiment parameters
86
- │ ├── tailoring_optimizer/ # Test-time tailoring optimizer configs
87
- │ └── trainer/ # PyTorch Lightning trainer configurations
88
- ├── scripts/ # Utility and data processing scripts
89
- │ ├── darcy-flow-scripts/ # Darcy flow specific data processing
90
- │ ├── parameter-perturb/ # Parameter perturbation utilities
91
- │ ├── reaction-diffusion-scripts/ # Reaction-diffusion data processing
92
- │ ├── data_splitter.py # Splits datasets into train/validation sets
93
- │ └── process_navier_stokes.py # Processes raw Navier-Stokes data
94
- ├── pdeinvbench/ # Main package source code
95
- │ ├── data/ # Data loading and preprocessing modules
96
- │ ├── lightning_modules/ # PyTorch Lightning training modules
97
- │ ├── losses/ # Loss function implementations
98
- │ ├── models/ # Neural network model implementations
99
- │ │ ├── __init__.py # Package initialization
100
- │ │ ├── conv_head.py # Convolutional head for parameter prediction
101
- │ │ ├── downsampler.py # Spatial downsampling layers
102
- │ │ ├── encoder.py # FNO and other encoder architectures
103
- │ │ ├── inverse_model.py # Main inverse problem model
104
- │ │ └── mlp.py # Multi-layer perceptron components
105
- │ └── utils/ # Utility functions and type definitions
106
- │ ├── __init__.py # Package initialization
107
- │ ├── config_utils.py # Hydra configuration utilities
108
- │ ├── types.py # Type definitions and PDE system constants
109
- │ └── ... # Additional utility modules
110
- └── train_inverse.py # Main training script for inverse problems
111
- ```
112
-
113
- ## Environment Setup
114
-
115
- This project requires **Python 3.11** with PyTorch 2.7, PyTorch Lightning, and several scientific computing libraries.
116
-
117
- ### Quick Setup (Recommended)
118
-
119
- Using the provided `environment.yml`:
120
-
121
- ```bash
122
- # Create environment (use micromamba or conda)
123
- conda env create -f environment.yml
124
- conda activate inv-env-tmp
125
-
126
- # Install the package in editable mode
127
- pip install -e .
128
- ```
129
-
130
- ### Manual Setup
131
-
132
- Alternatively, use the `build_env.sh` script:
133
-
134
- ```bash
135
- chmod +x build_env.sh
136
- ./build_env.sh
137
- ```
138
-
139
- ### Key Dependencies
140
-
141
- - **Deep Learning**: PyTorch 2.7, PyTorch Lightning 2.5
142
- - **Neural Operators**: neuraloperator 0.3.0, scOT (Poseidon fork)
143
- - **Scientific Computing**: scipy, numpy, h5py, torch-harmonics
144
- - **Configuration**: Hydra 1.3, OmegaConf 2.3
145
- - **Logging**: Weights & Biases (wandb)
146
- - **Type Checking**: jaxtyping 0.3.2, typeguard 2.13.3
147
-
148
- **Note**: The scOT architecture requires a custom fork installed from GitHub (automatically handled in setup scripts).
149
-
150
- ### Verify Installation
151
-
152
- ```bash
153
- python -c "import torch; import lightning; import pdeinvbench; print('Setup successful!')"
154
- ```
155
-
156
- ## Downloading Data
157
-
158
- We provide datasets on [HuggingFace](https://huggingface.co/datasets/DabbyOWL/PDE_Inverse_Problem_Benchmarking/tree/main) with a convenient download script. Use `huggingface_pdeinv_download.py` to batch download specific datasets and splits:
159
-
160
- ```bash
161
- pip install huggingface_hub
162
- python3 huggingface_pdeinv_download.py --dataset darcy-flow-241 --split train --local-dir ./data
163
- ```
164
-
165
- Available datasets include `darcy-flow-241`, `korteweg-de-vries-1d`, `navier-stokes-forced-2d`, `reaction-diffusion-2d-du`, and more. For complete dataset documentation, parameter ranges, and detailed download instructions, see the [Data Guide](DATA_GUIDE.md#2-downloading-data).
166
-
167
- ## Running Experiments
168
-
169
- We use `hydra` to manage experiment configurations. The repository supports all combinations of:
170
- - **PDE Systems**: `1dkdv`, `2drd`, `2dns`, `2dtf`, `2ddf`
171
- - **Problem Representation**:
172
- - **Derivative conditioning**
173
- - **Temporal conditioning**: conditioning on 2, 5,10,15,20,25
174
- - **Model architectures**: FNO, ResNet, scOT (scalable Operator Transformer)
175
- - **Training / Optimization strategies**:
176
- - **Fully data-driven supervision** — standard supervised training using paired parameter–solution data
177
- - **Physics-informed (residual) training** — includes a PDE residual loss term for self-supervised regularization
178
- - **Test-Time Tailoring (TTT)** — post-training fine-tuning using the PDE residual at inference time to adapt to new parameter regimes
179
- - **Scaling**:
180
- - **Model Scaling**: 500k parameters, 5 million parameters, 50 million parameters
181
- - **Data scaling**: parameter, initial condition, temporal horizon
182
- - **Resolution scaling**: 64×64, 128×128, 256×256, 512×512
183
-
184
- ### How Configs Work
185
-
186
- #### Base Configs
187
-
188
- Base configs are located in `configs` and provide starting points for experiments:
189
-
190
- - Top-level configs (e.g., `1dkdv.yaml`, `2drd.yaml`) combine specific options for datasets, models, and training settings
191
- - Individual component configs are in subdirectories (e.g., `configs/data/`, `configs/model/`)
192
-
193
- #### Hydra Override Mechanism
194
-
195
- Hydra allows you to override any configuration parameter via command line:
196
-
197
- 1. **Simple parameter override**: `parameter=value`
198
- 2. **Nested parameter override**: `group.parameter=value`
199
- 3. **Adding new parameters**: `+new_parameter=value`
200
-
201
- All overrides are automatically appended to the W&B experiment name for easy tracking.
202
-
203
- ### Basic Commands
204
-
205
- ```bash
206
- # Run with a predefined config
207
- python3 train_inverse.py --config-name={pde_system}
208
-
209
- # Run in test mode (evaluation only)
210
- python3 train_inverse.py --config-name={pde_system} +test_run=true
211
-
212
- # Load a model from W&B
213
- python3 train_inverse.py --config-name={pde_system} +inverse_model_wandb_run={project_id}/{project_name}/model-{model_id}:{version}
214
- ```
215
- pde_system: `1dkdv`, `2dtf`, `2dns`, `2drdk`, `2drddu`, `2ddf`
216
- ### Common Overrides
217
-
218
- #### Data-related Overrides
219
- ```bash
220
- # Specify data root directory
221
- data.data_root=/path/to/data
222
-
223
- # Control time window sampling
224
- data.every_nth_window=10
225
-
226
- # Control fraction of data used
227
- data.frac_ics_per_param=0.5
228
- data.frac_param_combinations=0.5
229
-
230
- # Control train/test temporal split
231
- data.train_window_end_percent=0.5
232
- data.test_window_start_percent=0.76
233
-
234
- # High-resolution data processing
235
- high_resolution=True
236
- data.downsample_factor=4 # Downsample from 512x512 to 128x128
237
- data.downsample_factor=2 # Downsample from 256x256 to 128x128
238
- data.batch_size=2 # Reduce batch size for higher resolution data
239
- ```
240
-
241
- #### Model-related Overrides
242
- ```bash
243
- # Choose a model
244
- model=fno
245
- model=scot
246
- model=resnet
247
- model=fno_50mil # Higher capacity model
248
- model=fno_500k # Lower capacity model
249
-
250
- # Configure model parameters
251
- model.model_config.paramnet.encoder.use_partials=False
252
-
253
- # Specify which parameters to predict (e.g., for ablation studies)
254
- model.paramnet.params_to_predict=['Du']
255
- model.paramnet.params_to_predict=['Dv']
256
- ```
257
-
258
- #### Training Overrides
259
- ```bash
260
- # Control distributed training
261
- +trainer.num_nodes=2
262
-
263
- # Set random seed
264
- seed=0
265
-
266
- # Load a pre-trained model
267
- +inverse_model_wandb_run={project_id}/{project_name}/model-{model_id}:{version}
268
-
269
- # Enable test-only mode (no training)
270
- +test_run=true
271
- ```
272
-
273
- #### Loss-related Overrides
274
- ```bash
275
- # Adjust loss weights
276
- loss.inverse_param_loss_weight=0
277
- loss.inverse_residual_loss_weight=1
278
- ```
279
-
280
- #### Logging Overrides
281
- ```bash
282
- # Set W&B project and directory
283
- logging.project=my_project
284
- logging.save_dir=/path/to/wandb/cache
285
- ```
286
-
287
- ### Multi-GPU and Distributed Training
288
-
289
- ```bash
290
- # Single GPU
291
- CUDA_VISIBLE_DEVICES=0 python3 train_inverse.py --config-name={pde_system}
292
-
293
- # Multi-node with SLURM
294
- srun python3 train_inverse.py --config-name={pde_system} +trainer.num_nodes={num_nodes}
295
- # num_nodes = 1, 2, 4, etc.
296
- ```
297
-
298
- ### 🧪 Experiment Patterns Along Core Design Axes
299
-
300
- This section provides ready-to-run experiment templates organized by the core research dimensions of the benchmark. Each pattern includes concrete commands and parameter sweep recommendations.
301
-
302
- ---
303
-
304
- #### 1️⃣ Training/Optimization Strategies
305
-
306
- Experiments exploring different supervision and optimization approaches for inverse problems.
307
-
308
- ##### 1.1 Fully Data-Driven vs Physics-Informed Training
309
-
310
- ```bash
311
- # Fully data-driven (no residual loss)
312
- python3 train_inverse.py --config-name={pde_system} \
313
- loss.inverse_residual_loss_weight=0
314
-
315
- # Physics-informed with varying residual weights
316
- python3 train_inverse.py --config-name={pde_system} \
317
- loss.inverse_residual_loss_weight={weight}
318
- # Recommended sweep: weight = 1.0, 0.1, 0.01, 0.001, 0.0001
319
- ```
320
-
321
- This allows you to control the balance between data-driven supervision and physics-based regularization for parameter identification.
322
-
323
- ##### 1.2 Test-Time Tailoring (TTT)
324
-
325
- At test time, given an observed PDE trajectory `u_{t-k}, ..., u_t`, the inverse model `f_θ` predicts parameters `φ̂ = f_θ(u_{t-k}, ..., u_t)`.
326
- Test-Time Tailoring (TTT) adapts `f_θ` by minimizing a physics-based self-supervised loss derived from the PDE residual:
327
-
328
- `L_Tailor = ||F_{φ̂}(u_{t-k}, ..., u_t)||² + α * ( ||f_θ(u_{t-k}, ..., u_t) - f_{θ_frozen}(u_{t-k}, ..., u_t)||² / ||f_{θ_frozen}(u_{t-k}, ..., u_t)||² )`
329
-
330
- Here `F_{φ̂}` is a discrete approximation of the PDE operator, and `α` controls the strength of the *anchor loss* that stabilizes adaptation. Optimization is performed for a small number of gradient steps on `L_Tailor`, allowing the model to specialize to new or out-of-distribution parameter regimes at inference time.
331
-
332
- ```bash
333
- # Basic TTT with pre-trained model
334
- python3 train_inverse.py --config-name={pde_system}_ttt \
335
- inverse_model_wandb_run={project_id}/{project_name}/model-{model_id}:{version} \
336
- tailor_anchor_loss_weight={alpha} \
337
- num_tailoring_steps={steps} \
338
- tailoring_optimizer_lr={lr}
339
-
340
- ```
341
-
342
- ---
343
-
344
- #### 2️⃣ Problem Representation and Inductive Bias
345
-
346
- Experiments exploring how to encode physical problems and what architectural inductive biases work best.
347
-
348
- ##### 2.1 Conditioning Strategy: Derivatives vs Raw Solutions
349
-
350
- ```bash
351
- # Derivative conditioning (spatial/temporal derivatives as input)
352
- python3 train_inverse.py --config-name={pde_system} \
353
- model.model_config.paramnet.encoder.use_partials=True
354
-
355
- # Temporal conditioning (raw solution snapshots only)
356
- python3 train_inverse.py --config-name={pde_system} \
357
- model.model_config.paramnet.encoder.use_partials=False
358
- ```
359
-
360
- Derivative conditioning provides explicit gradient information from the frames.
361
-
362
- ##### 2.2 Model Architecture: Inductive Biases
363
-
364
- ```bash
365
- # Fourier Neural Operator (spectral bias)
366
- python3 train_inverse.py --config-name={pde_system} model=fno
367
-
368
- # ResNet (convolutional locality bias)
369
- python3 train_inverse.py --config-name={pde_system} model=resnet
370
-
371
- # scOT - Scalable Operator Transformer (attention-based)
372
- python3 train_inverse.py --config-name={pde_system} model=scot
373
- ```
374
-
375
- ##### 2.3 Temporal Conditioning Frames
376
-
377
- ```bash
378
- # Fourier Neural Operator (spectral bias)
379
- python3 train_inverse.py --config-name={pde_system} n_past={num_frames}
380
-
381
- # num_frames=2,5,10,15,20
382
- ```
383
-
384
-
385
- ---
386
-
387
- #### 3️⃣ Scaling Experiments
388
-
389
- Experiments investigating how performance scales with model capacity, data quantity, and spatial resolution.
390
-
391
- ##### 3.1 Model Capacity Scaling
392
-
393
- ```bash
394
- # Small model: ~500k parameters
395
- python3 train_inverse.py --config-name={pde_system} model=fno_500k
396
-
397
- # Base model: ~5M parameters
398
- python3 train_inverse.py --config-name={pde_system} model=fno
399
-
400
- # Large model: ~50M parameters
401
- python3 train_inverse.py --config-name={pde_system} model=fno_50mil
402
- ```
403
-
404
-
405
- ##### 3.2 Data Quantity Scaling
406
-
407
- ###### 3.2.1 Initial Condition Diversity Scaling
408
- ```bash
409
- python3 train_inverse.py --config-name={pde_system} \
410
- data.frac_ics_per_param={frac}
411
- # Recommended sweep: frac = 0.2, 0.35, 0.5, 0.75
412
- ```
413
-
414
- Only `frac_ics_per_param` percent of initial trajectories per parameter will be sampled during training, allowing you to control the amount of initial condition diversity and study data efficiency across different initial states.
415
-
416
- ###### 3.2.2 Parameter Space Coverage Scaling
417
- ```bash
418
- python3 train_inverse.py --config-name={pde_system} \
419
- data.frac_param_combinations={frac}
420
- # Recommended sweep: frac = 0.2, 0.35, 0.5, 0.75
421
- ```
422
-
423
- Only `frac_param_combinations` percent of parameters from the train set will be sampled, allowing you to control parameter space coverage and understand how model performance scales with the diversity of parameter combinations in the training data.
424
-
425
- ###### 3.2.3 Temporal Horizon Scaling
426
- ```bash
427
- python3 train_inverse.py --config-name={pde_system} \
428
- data.train_window_end_percent={train_end} \
429
- data.test_window_start_percent={test_start}
430
- # Recommended sweeps:
431
- # train_end = 0.25, 0.5, 0.76, 1.0
432
- # test_start = 0.76
433
- ```
434
-
435
- Only the first `train_window_end_percent` of trajectories are used for training, while the last `test_window_start_percent` are used for evaluation. This enables studies on temporal extrapolation and how much temporal dynamics are needed for accurate parameter identification.
436
-
437
- ##### 3.3 Spatial Resolution Scaling
438
-
439
- ```bash
440
- # High-resolution experiments with online downsampling
441
- python3 train_inverse.py --config-name={pde_system} \
442
- high_resolution=True \
443
- data.downsample_factor={factor} \
444
- data.batch_size={batch_size}
445
-
446
- # Example configurations:
447
- # factor=1, 512×512 (full resolution)
448
- # factor=2, 256×256
449
- # factor=4, 128×128
450
- # factor=8, 64×64
451
- ```
452
-
453
- The `HIGH_RESOLUTION_PDE_SPATIAL_SIZE` in `pdeinvbench/utils/types.py` defines the maximum resolution (typically 512×512), and the downsampling factor reduces from this maximum. These experiments help determine how resolution affects identifiability of parameters and whether models trained on low-resolution data can generalize to high-resolution inputs.
454
-
455
- ## Testing
456
-
457
- The `tests/` directory contains validation scripts to verify the correctness of PDE residual computations and numerical implementations.
458
-
459
- ### Test Structure
460
-
461
- - **`test_fluids.py`**: Validates turbulent flow and Navier-Stokes residual computations by comparing PyTorch implementations against NumPy reference implementations
462
- - **`fluids_numpy_reference.py`**: NumPy reference implementations for fluid dynamics operators (stream function, advection, Laplacian)
463
- - **`reaction-diffusion-residuals.py`**: Validates reaction-diffusion residual computations and generates visualization GIFs
464
-
465
- ### Running Tests
466
-
467
- **Standard pytest (skips tests requiring external data):**
468
- ```bash
469
- pytest tests/ -v
470
- ```
471
-
472
- ### Test Validation
473
-
474
- The validation tests verify:
475
- 1. **Numerical accuracy**: Finite difference operators match reference implementations (error < 1e-3)
476
- 2. **PDE residuals**: Ground-truth solutions produce near-zero residuals (typically < 1e-4)
477
- 3. **Operator correctness**: Stream function, advection, Laplacian, and gradient computations
478
- 4. **Batch independence**: No cross-contamination between batch elements
479
-
480
- ### Data Requirements
481
-
482
- Some tests require external HDF5 datasets:
483
- - Tests automatically **skip** (not fail) when data is unavailable
484
- - Suitable for CI/CD environments without large datasets
485
- - For full validation, download datasets following the [Data Guide](DATA_GUIDE.md)
486
-
487
- ### Visualization
488
-
489
- Residual validation scripts generate animated GIFs in `test-images/` showing:
490
- - Temporal evolution of PDE residuals
491
- - Spatial distribution of numerical errors
492
- - Threshold-based error highlighting
493
-
494
-
495
- ## Shape-Checking
496
-
497
- This codebase uses [jaxtyping](https://github.com/google/jaxtyping) for runtime tensor shape validation, which helps catch dimension mismatches.
498
-
499
- **To disable shape checking for faster execution:**
500
- ```bash
501
- # Disable for production runs
502
- export JAXTYPING_DISABLE=1
503
- python train_inverse.py --config-name=2dtf model=fno
504
-
505
- # Or inline
506
- JAXTYPING_DISABLE=1 python train_inverse.py --config-name=2dtf model=fno
507
- ```
508
-
509
- ## Adding a New Dataset
510
-
511
- To add a new PDE system to the benchmark, follow the guide in [Data Guide - Section 5: Adding a New Dataset](DATA_GUIDE.md#5-adding-a-new-dataset).
512
-
513
- ## Adding a New Model
514
-
515
- To add a new encoder architecture (e.g., Transformer, U-Net), follow the guide in [Model Guide - Adding a New Model](MODEL_GUIDE.md#adding-a-new-model).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/1dkdv.yaml DELETED
@@ -1,5 +0,0 @@
1
- name: 1dkdv
2
- defaults:
3
- - _self_
4
- - base
5
- - override system_params: 1dkdv
 
 
 
 
 
 
configs/1dkdv_ttt.yaml DELETED
@@ -1,8 +0,0 @@
1
- name: 1dkdv
2
- defaults:
3
- - _self_
4
- - ttt_base
5
- - override system_params: 1dkdv
6
-
7
- inverse_model_wandb_run: ml-pdes/1dkdv_test_time_tuning/model-4j475b9v:v199
8
- # inverse_model_wandb_run: ml-pdes/time_logging_test/model-tw4k8e8h:best
 
 
 
 
 
 
 
 
 
configs/2ddf.yaml DELETED
@@ -1,8 +0,0 @@
1
- name: 2ddf
2
- defaults:
3
- - base
4
- - _self_
5
- - override callbacks: 2ddf
6
- - override system_params: 2ddf
7
-
8
- n_past: 1
 
 
 
 
 
 
 
 
 
configs/2ddf_ttt.yaml DELETED
@@ -1,9 +0,0 @@
1
- name: 2ddf
2
- defaults:
3
- - ttt_base # Load base first
4
- - _self_ # Then override with this file's values
5
- - override callbacks: 2ddf
6
- - override system_params: 2ddf
7
-
8
- inverse_model_wandb_run: ml-pdes/2ddf_compilation_folded/model-r5fj8hr1:best
9
- n_past: 1
 
 
 
 
 
 
 
 
 
 
configs/2dns.yaml DELETED
@@ -1,5 +0,0 @@
1
- name: 2dns
2
- defaults:
3
- - _self_
4
- - base
5
- - override system_params: 2dns
 
 
 
 
 
 
configs/2dns_ttt.yaml DELETED
@@ -1,10 +0,0 @@
1
- name: 2dns
2
- defaults:
3
- - _self_
4
- - ttt_base
5
- - override system_params: 2dns
6
-
7
-
8
- test_run: true
9
- inverse_model_wandb_run: ml-pdes/tailoring_redone/model-wuhbdlqr:v200
10
- # inverse_model_wandb_run: ml-pdes/time_logging_test/model-8mwjk5v0:best
 
 
 
 
 
 
 
 
 
 
 
configs/2drddu.yaml DELETED
@@ -1,7 +0,0 @@
1
- name: 2drd
2
- defaults:
3
- - _self_
4
- - base
5
- - override system_params: 2drddu
6
-
7
- # Note: params_to_predict is already set to ["Du"] in system_params/2drddu.yaml
 
 
 
 
 
 
 
 
configs/2drddu_ttt.yaml DELETED
@@ -1,12 +0,0 @@
1
- name: 2drd
2
- defaults:
3
- - _self_
4
- - ttt_base
5
- - override system_params: 2drddu
6
-
7
-
8
- test_run: true
9
- inverse_model_wandb_run: ml-pdes/2drddu_compilation/model-lslyzo92:v184 # 100 % ics
10
- # inverse_model_wandb_run: ml-pdes/2drddu_compilation/model-jupsos6p:best # 20 % ics
11
- # inverse_model_wandb_run: ml-pdes/time_logging_test/model-71xuth62:best
12
-
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/2drdk.yaml DELETED
@@ -1,6 +0,0 @@
1
- name: 2drd
2
- defaults:
3
- - _self_
4
- - base
5
- - override system_params: 2drdk
6
-
 
 
 
 
 
 
 
configs/2drdk_ttt.yaml DELETED
@@ -1,10 +0,0 @@
1
- name: 2drd
2
- defaults:
3
- - _self_
4
- - ttt_base
5
- - override system_params: 2drdk
6
-
7
-
8
- test_run: true
9
- inverse_model_wandb_run: ml-pdes/2drdk_compilation/model-30801ssy:v189
10
- # inverse_model_wandb_run: ml-pdes/time_logging_test/model-o2v1e8oa:best
 
 
 
 
 
 
 
 
 
 
 
configs/2dtf.yaml DELETED
@@ -1,6 +0,0 @@
1
- name: 2dtf
2
- defaults:
3
- - _self_
4
- - base
5
- - override system_params: 2dtf
6
-
 
 
 
 
 
 
 
configs/2dtf_ttt.yaml DELETED
@@ -1,11 +0,0 @@
1
- name: 2dtf
2
- defaults:
3
- - _self_
4
- - ttt_base
5
- - override system_params: 2dtf
6
-
7
-
8
- test_run: true
9
- # inverse_model_wandb_run: ml-pdes/2dtf_compilation/model-kjskfseu:v172
10
- inverse_model_wandb_run: ml-pdes/tailoring_redone/model-h6cc91c4:v182
11
- # inverse_model_wandb_run: ml-pdes/time_logging_test/model-irns4x30:best
 
 
 
 
 
 
 
 
 
 
 
 
configs/base.yaml DELETED
@@ -1,19 +0,0 @@
1
- name: base
2
- defaults:
3
- - _self_
4
- - callbacks: base
5
- - model: fno
6
- - lightning_module: base
7
- - logging: base
8
- - loss: relative
9
- - optimizer: adam
10
- - trainer: trainer
11
- - lr_scheduler: cosine
12
- - system_params: Null
13
- - data: base
14
-
15
- n_past: 2
16
- n_future: -1 #doesn't matter for inverse problems
17
- mode: "inverse"
18
- seed: 0
19
- high_resolution: false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/callbacks/2ddf.yaml DELETED
@@ -1,9 +0,0 @@
1
- - _target_: lightning.pytorch.callbacks.ModelCheckpoint # save model checkpoints
2
- monitor: validation/loss
3
- mode: min
4
- save_last: True
5
- - _target_: lightning.pytorch.callbacks.LearningRateMonitor # log learning rate
6
- logging_interval: epoch
7
- - _target_: pdeinvbench.lightning_modules.logging_callbacks.InverseErrorByTailoringStepCallback # log error by tailoring step
8
- - _target_: pdeinvbench.lightning_modules.logging_callbacks.PDEParamErrorTestTimeTailoringCallback
9
- - _target_: pdeinvbench.lightning_modules.logging_callbacks.PDEParamErrorPlottingCallback # stratify error by PDE parameter
 
 
 
 
 
 
 
 
 
 
configs/callbacks/base.yaml DELETED
@@ -1,10 +0,0 @@
1
- - _target_: pdeinvbench.lightning_modules.logging_callbacks.PDEParamErrorPlottingCallback # stratify error by PDE parameter
2
- - _target_: lightning.pytorch.callbacks.ModelCheckpoint # save model checkpoints
3
- monitor: validation/loss
4
- mode: min
5
- save_last: True
6
- save_top_k: 1
7
- - _target_: lightning.pytorch.callbacks.LearningRateMonitor # log learning rate
8
- logging_interval: epoch
9
- - _target_: pdeinvbench.lightning_modules.logging_callbacks.InverseErrorByTailoringStepCallback # log error by tailoring step
10
- - _target_: pdeinvbench.lightning_modules.logging_callbacks.PDEParamErrorTestTimeTailoringCallback
 
 
 
 
 
 
 
 
 
 
 
configs/data/base.yaml DELETED
@@ -1,112 +0,0 @@
1
- # These will be overridden by child configs
2
- name: "placeholder_inverse"
3
- data_root: "placeholder_path"
4
- train_data_root: ${system_params.train_data_root}
5
- val_data_root: ${system_params.val_data_root}
6
- ood_data_root: ${system_params.ood_data_root}
7
- ood_data_root_extreme: ${system_params.ood_data_root_extreme}
8
- test_data_root: ${system_params.test_data_root}
9
- num_channels: ${system_params.num_channels}
10
- batch_size: 8
11
- dilation: 1
12
- cutoff_first_n_frames: ${system_params.cutoff_first_n_frames}
13
- frac_param_combinations: 1
14
- frac_ics_per_param: 1
15
- random_sample_param: True
16
- downsample_factor: 0
17
- every_nth_window: 10
18
- train_window_start_percent: 0
19
- train_window_end_percent: 1
20
- test_window_start_percent: 0
21
- test_window_end_percent: 1
22
-
23
- pde:
24
- _target_: pdeinvbench.utils.types.PDE
25
- value: ${system_params.pde_name}
26
-
27
- train_dataloader:
28
- _target_: torch.utils.data.DataLoader
29
- dataset:
30
- _target_: pdeinvbench.data.PDE_MultiParam
31
- data_root: ${data.train_data_root}
32
- pde: ${data.pde}
33
- n_past: ${n_past}
34
- train: True
35
- dilation: ${data.dilation}
36
- cutoff_first_n_frames: ${data.cutoff_first_n_frames}
37
- frac_param_combinations: ${data.frac_param_combinations}
38
- frac_ics_per_param: ${data.frac_ics_per_param}
39
- random_sample_param: ${data.random_sample_param}
40
- downsample_factor: ${data.downsample_factor}
41
- every_nth_window: ${data.every_nth_window}
42
- window_start_percent: ${data.train_window_start_percent}
43
- window_end_percent: ${data.train_window_end_percent}
44
- batch_size: ${data.batch_size}
45
- shuffle: True
46
-
47
- val_dataloader:
48
- _target_: torch.utils.data.DataLoader
49
- dataset:
50
- _target_: pdeinvbench.data.PDE_MultiParam
51
- data_root: ${data.val_data_root}
52
- pde: ${data.pde}
53
- n_past: ${n_past}
54
- train: False
55
- dilation: ${data.dilation}
56
- cutoff_first_n_frames: ${data.cutoff_first_n_frames}
57
- frac_param_combinations: ${data.frac_param_combinations}
58
- frac_ics_per_param: ${data.frac_ics_per_param}
59
- random_sample_param: ${data.random_sample_param}
60
- downsample_factor: ${data.downsample_factor}
61
- every_nth_window: ${data.every_nth_window}
62
- window_start_percent: ${data.train_window_start_percent}
63
- window_end_percent: ${data.train_window_end_percent}
64
- batch_size: ${data.batch_size}
65
- shuffle: False
66
-
67
- ood_dataloader:
68
- _target_: torch.utils.data.DataLoader
69
- dataset:
70
- _target_: pdeinvbench.data.PDE_MultiParam
71
- data_root: ${data.ood_data_root}
72
- pde: ${data.pde}
73
- n_past: ${n_past}
74
- train: False
75
- dilation: ${data.dilation}
76
- cutoff_first_n_frames: ${data.cutoff_first_n_frames}
77
- downsample_factor: ${data.downsample_factor}
78
- every_nth_window: ${data.every_nth_window}
79
- batch_size: ${data.batch_size}
80
- shuffle: False
81
-
82
- ood_dataloader_extreme:
83
- _target_: torch.utils.data.DataLoader
84
- dataset:
85
- _target_: pdeinvbench.data.PDE_MultiParam
86
- data_root: ${data.ood_data_root_extreme}
87
- pde: ${data.pde}
88
- n_past: ${n_past}
89
- train: False
90
- dilation: ${data.dilation}
91
- cutoff_first_n_frames: ${data.cutoff_first_n_frames}
92
- downsample_factor: ${data.downsample_factor}
93
- every_nth_window: ${data.every_nth_window}
94
- batch_size: ${data.batch_size}
95
- shuffle: False
96
-
97
- test_dataloader:
98
- _target_: torch.utils.data.DataLoader
99
- dataset:
100
- _target_: pdeinvbench.data.PDE_MultiParam
101
- data_root: ${data.test_data_root}
102
- pde: ${data.pde}
103
- n_past: ${n_past}
104
- train: False
105
- dilation: ${data.dilation}
106
- cutoff_first_n_frames: ${data.cutoff_first_n_frames}
107
- downsample_factor: ${data.downsample_factor}
108
- every_nth_window: ${data.every_nth_window}
109
- window_start_percent: ${data.test_window_start_percent}
110
- window_end_percent: ${data.test_window_end_percent}
111
- batch_size: ${data.batch_size}
112
- shuffle: False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/lightning_module/base.yaml DELETED
@@ -1,10 +0,0 @@
1
- _target_: pdeinvbench.lightning_modules.InverseModule
2
- pde: ${data.pde}
3
- n_past: ${n_past}
4
- batch_size: ${data.batch_size}
5
- use_partials: ${model.model_config.paramnet.encoder.use_partials}
6
- params_to_predict: ${model.model_config.paramnet.params_to_predict}
7
- param_loss_metric: ${loss.param_loss_metric}
8
- inverse_residual_loss_weight: ${loss.inverse_residual_loss_weight}
9
- inverse_param_loss_weight: ${loss.inverse_param_loss_weight}
10
- residual_filter: False
 
 
 
 
 
 
 
 
 
 
 
configs/lightning_module/ttt.yaml DELETED
@@ -1,8 +0,0 @@
1
- defaults:
2
- - base
3
-
4
- _target_: pdeinvbench.lightning_modules.InverseTestTimeTailoringModule
5
- num_tailoring_steps: ${num_tailoring_steps}
6
- tailor_per_batch: ${tailor_per_batch}
7
- tailor_anchor_loss_weight: ${tailor_anchor_loss_weight}
8
- tailor_residual_loss_weight: ${tailor_residual_loss_weight}
 
 
 
 
 
 
 
 
 
configs/logging/base.yaml DELETED
@@ -1,4 +0,0 @@
1
- _target_: pdeinvbench.utils.logging_utils.CustomWandbLogger
2
- entity: "ml-pdes"
3
- save_dir: "logs"
4
- project: ${data.name}
 
 
 
 
 
configs/loss/mse.yaml DELETED
@@ -1,5 +0,0 @@
1
- param_loss_metric:
2
- _target_: pdeinvbench.utils.types.ParamMetrics
3
- value: "Mean Squared Error"
4
- inverse_residual_loss_weight: 0
5
- inverse_param_loss_weight: 1
 
 
 
 
 
 
configs/loss/relative.yaml DELETED
@@ -1,5 +0,0 @@
1
- param_loss_metric:
2
- _target_: pdeinvbench.utils.types.ParamMetrics
3
- value: "Relative Error"
4
- inverse_residual_loss_weight: 0
5
- inverse_param_loss_weight: 1
 
 
 
 
 
 
configs/lr_scheduler/cosine.yaml DELETED
@@ -1,2 +0,0 @@
1
- _target_: torch.optim.lr_scheduler.CosineAnnealingLR
2
- T_max: ${trainer.max_epochs}
 
 
 
configs/model/fno.yaml DELETED
@@ -1,36 +0,0 @@
1
- # Shared FNO model configuration
2
- # Interpolates ALL parameters from system_params
3
- name: "${system_params.name}_fno"
4
- dropout: ${system_params.fno_dropout}
5
- hidden_channels: ${system_params.fno_hidden_channels}
6
- encoder_layers: ${system_params.fno_encoder_layers}
7
- downsampler_layers: ${system_params.fno_downsampler_layers}
8
- mlp_layers: ${system_params.fno_mlp_layers}
9
-
10
- model_config:
11
- _target_: pdeinvbench.models.inverse_model.InverseModel
12
- paramnet:
13
- _target_: pdeinvbench.models.inverse_model.ParameterNet
14
- pde: ${data.pde}
15
- normalize: ${system_params.normalize}
16
- logspace: ${system_params.logspace}
17
- params_to_predict: ${system_params.params_to_predict}
18
- mlp_type: ${system_params.mlp_type}
19
- encoder:
20
- _target_: pdeinvbench.models.encoder.FNOEncoder
21
- n_modes: ${system_params.fno_n_modes}
22
- n_past: ${n_past}
23
- n_future: ${n_future}
24
- n_layers: ${model.encoder_layers}
25
- data_channels: ${data.num_channels}
26
- hidden_channels: ${model.hidden_channels}
27
- use_partials: True
28
- pde: ${data.pde}
29
- batch_size: ${data.batch_size}
30
- downsampler: ${system_params.fno_downsampler}
31
- mlp_hidden_size: ${model.hidden_channels}
32
- mlp_layers: ${model.mlp_layers}
33
- mlp_activation: "relu"
34
- mlp_dropout: ${model.dropout}
35
- downsample_factor: ${data.downsample_factor}
36
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/model/fno_50k.yaml DELETED
@@ -1,9 +0,0 @@
1
- # Small FNO variant (500k params)
2
- # Inherits structure from fno.yaml, only overrides size parameters
3
- defaults:
4
- - fno
5
-
6
- name: "${system_params.name}_fno_50k"
7
- hidden_channels: ${system_params.fno_hidden_channels_50k}
8
- encoder_layers: ${system_params.fno_encoder_layers_50k}
9
-
 
 
 
 
 
 
 
 
 
 
configs/model/fno_50mil.yaml DELETED
@@ -1,9 +0,0 @@
1
- # Large FNO variant (50 million params)
2
- # Inherits structure from fno.yaml, only overrides size parameters
3
- defaults:
4
- - fno
5
-
6
- name: "${system_params.name}_fno_50mil"
7
- hidden_channels: ${system_params.fno_hidden_channels_50mil}
8
- encoder_layers: ${system_params.fno_encoder_layers_50mil}
9
-
 
 
 
 
 
 
 
 
 
 
configs/model/resnet.yaml DELETED
@@ -1,35 +0,0 @@
1
- # Shared ResNet model configuration
2
- # Interpolates ALL parameters from system_params
3
- name: "${system_params.name}_resnet"
4
- dropout: ${system_params.resnet_dropout}
5
- hidden_channels: ${system_params.resnet_hidden_channels}
6
- encoder_layers: ${system_params.resnet_encoder_layers}
7
- downsampler_layers: ${system_params.resnet_downsampler_layers}
8
- mlp_layers: ${system_params.resnet_mlp_layers}
9
-
10
- model_config:
11
- _target_: pdeinvbench.models.inverse_model.InverseModel
12
- paramnet:
13
- _target_: pdeinvbench.models.inverse_model.ParameterNet
14
- pde: ${data.pde}
15
- normalize: ${system_params.normalize}
16
- logspace: ${system_params.logspace}
17
- params_to_predict: ${system_params.params_to_predict}
18
- mlp_type: ${system_params.mlp_type}
19
- encoder:
20
- _target_: pdeinvbench.models.encoder.ResnetEncoder
21
- n_past: ${n_past}
22
- n_future: ${n_future}
23
- n_layers: ${model.encoder_layers}
24
- data_channels: ${data.num_channels}
25
- hidden_channels: ${model.hidden_channels}
26
- use_partials: True
27
- pde: ${data.pde}
28
- batch_size: ${data.batch_size}
29
- downsampler: ${system_params.resnet_downsampler}
30
- mlp_hidden_size: ${model.hidden_channels}
31
- mlp_layers: ${model.mlp_layers}
32
- mlp_activation: "relu"
33
- mlp_dropout: ${model.dropout}
34
- downsample_factor: ${data.downsample_factor}
35
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/model/scot.yaml DELETED
@@ -1,41 +0,0 @@
1
- # Shared ScOT model configuration
2
- # Interpolates ALL parameters from system_params
3
- name: "${system_params.name}_scot"
4
- dropout: ${system_params.scot_dropout}
5
- hidden_channels: ${system_params.scot_hidden_channels}
6
- encoder_layers: ${system_params.scot_encoder_layers}
7
- downsampler_layers: ${system_params.scot_downsampler_layers}
8
- mlp_layers: ${system_params.scot_mlp_layers}
9
-
10
- model_config:
11
- _target_: pdeinvbench.models.inverse_model.InverseModel
12
- paramnet:
13
- _target_: pdeinvbench.models.inverse_model.ParameterNet
14
- pde: ${data.pde}
15
- normalize: ${system_params.normalize}
16
- logspace: ${system_params.logspace}
17
- params_to_predict: ${system_params.params_to_predict}
18
- mlp_type: ${system_params.mlp_type}
19
- encoder:
20
- _target_: pdeinvbench.models.encoder.ScOTEncoder
21
- embed_dim: ${system_params.scot_embed_dim}
22
- n_layers: ${model.encoder_layers}
23
- hidden_size: ${system_params.scot_hidden_size}
24
- patch_size: ${system_params.scot_patch_size}
25
- num_heads: ${system_params.scot_num_heads}
26
- skip_connections: ${system_params.scot_skip_connections}
27
- depths: ${system_params.scot_depths}
28
- n_past: ${n_past}
29
- n_future: ${n_future}
30
- use_partials: True
31
- data_channels: ${data.num_channels}
32
- pde: ${data.pde}
33
- batch_size: ${data.batch_size}
34
- downsampler: ${system_params.scot_downsampler}
35
- mlp_hidden_size: ${system_params.scot_mlp_hidden_size}
36
- mlp_layers: ${model.mlp_layers}
37
- mlp_activation: "relu"
38
- mlp_dropout: ${model.dropout}
39
- condition_on_time: ${system_params.scot_condition_on_time}
40
- downsample_factor: ${data.downsample_factor}
41
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/optimizer/adam.yaml DELETED
@@ -1,2 +0,0 @@
1
- _target_: torch.optim.Adam
2
- lr: 0.0001
 
 
 
configs/system_params/1dkdv.yaml DELETED
@@ -1,17 +0,0 @@
1
- # ============================================
2
- # 1DKDV SYSTEM PARAMETERS
3
- # ============================================
4
- defaults:
5
- - base
6
-
7
- # ============ Data Parameters ============
8
- name: "1dkdv_inverse"
9
- data_root: "/data/shared/meta-pde/folded_data/kdv/fold_2"
10
- pde_name: "Korteweg-de Vries 1D"
11
- num_channels: 1
12
- cutoff_first_n_frames: 0
13
-
14
- # ============ Model Parameters ============
15
- downsampler_input_dim: 1 # 1D system
16
- params_to_predict: ["delta"]
17
- normalize: True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/system_params/2ddf.yaml DELETED
@@ -1,27 +0,0 @@
1
- # ============================================
2
- # 2DDF SYSTEM PARAMETERS
3
- # ============================================
4
- defaults:
5
- - base
6
-
7
- # ============ Data Parameters ============
8
- name: "2ddf_inverse"
9
- data_root: "/data/shared/meta-pde/darcy-flow/r241_folded/"
10
- pde_name: "Darcy Flow 2D"
11
- num_channels: 1
12
- cutoff_first_n_frames: 0
13
-
14
- # ============ Model Parameters ============
15
- params_to_predict: ["coeff"]
16
- normalize: False
17
- mlp_type: "conv" # Special: 2ddf uses conv MLP
18
-
19
- # Override downsamplers: 2ddf uses IdentityMap instead of ConvDownsampler
20
- fno_downsampler:
21
- _target_: pdeinvbench.models.downsampler.IdentityMap
22
-
23
- resnet_downsampler:
24
- _target_: pdeinvbench.models.downsampler.IdentityMap
25
-
26
- scot_downsampler:
27
- _target_: pdeinvbench.models.downsampler.IdentityMap
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/system_params/2dns.yaml DELETED
@@ -1,16 +0,0 @@
1
- # ============================================
2
- # 2DNS SYSTEM PARAMETERS
3
- # ============================================
4
- defaults:
5
- - base
6
-
7
- # ============ Data Parameters ============
8
- name: "2dns_inverse"
9
- data_root: "/data/shared/meta-pde/sampled_parameters_split/navierstokes64"
10
- pde_name: "Navier Stokes 2D"
11
- num_channels: 1
12
- cutoff_first_n_frames: 0
13
-
14
- # ============ Model Parameters ============
15
- params_to_predict: ["re"]
16
- normalize: False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/system_params/2drddu.yaml DELETED
@@ -1,9 +0,0 @@
1
- # ============================================
2
- # 2DRD-DU SYSTEM PARAMETERS
3
- # ============================================
4
- defaults:
5
- - 2drdk
6
- data_root: "/data/shared/meta-pde/folded_data/reaction-diffusion-2d/Du_fold_2"
7
-
8
- # ============ Model Parameters ============
9
- params_to_predict: ["Du"]
 
 
 
 
 
 
 
 
 
 
configs/system_params/2drdk.yaml DELETED
@@ -1,18 +0,0 @@
1
- # ============================================
2
- # 2DRD-K SYSTEM PARAMETERS
3
- # ============================================
4
- defaults:
5
- - base
6
-
7
- # ============ Data Parameters ============
8
- name: "2drdk_inverse"
9
- data_root: "/data/shared/meta-pde/folded_data/reaction-diffusion-2d/k_fold_2"
10
- pde_name: "Reaction Diffusion 2D"
11
- num_channels: 2
12
- cutoff_first_n_frames: 2
13
- # Special override for corner extreme OOD
14
- ood_data_root_extreme: ${system_params.data_root}/out_of_distribution_corner_extreme
15
-
16
- # ============ Model Parameters ============
17
- params_to_predict: ["k"]
18
- normalize: False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/system_params/2dtf.yaml DELETED
@@ -1,16 +0,0 @@
1
- # ============================================
2
- # 2DTF SYSTEM PARAMETERS
3
- # ============================================
4
- defaults:
5
- - base
6
-
7
- # ============ Data Parameters ============
8
- name: "2dtf_inverse"
9
- data_root: "/data/shared/meta-pde/folded_data/turbulent-flow-2d/fold_2"
10
- pde_name: "Turbulent Flow 2D"
11
- num_channels: 1
12
- cutoff_first_n_frames: 0
13
-
14
- # ============ Model Parameters ============
15
- params_to_predict: ["nu"]
16
- normalize: True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/system_params/base.yaml DELETED
@@ -1,91 +0,0 @@
1
- # Base system parameters
2
- # Defines common structure and defaults for BOTH data AND model
3
- # Each system inherits this and overrides specific values
4
-
5
- # ============ Data Parameters ============
6
- name: "placeholder_inverse"
7
- data_root: "placeholder_path"
8
- train_data_root: ${system_params.data_root}/train
9
- val_data_root: ${system_params.data_root}/validation
10
- ood_data_root: ${system_params.data_root}/out_of_distribution
11
- ood_data_root_extreme: ${system_params.data_root}/out_of_distribution_extreme
12
- test_data_root: ${system_params.data_root}/test
13
- pde_name: "placeholder_pde"
14
- num_channels: 1
15
- cutoff_first_n_frames: 0
16
-
17
- # ============ Model - System-Specific Parameters ============
18
- params_to_predict: []
19
- normalize: False
20
- logspace: False
21
- mlp_type: "mlp" # Default to standard MLP (2ddf overrides to "conv")
22
- downsampler_input_dim: 2 # 1 for 1D systems, 2 for 2D systems
23
-
24
- # ============ FNO Architecture ============
25
- fno_hidden_channels: 64
26
- fno_encoder_layers: 4
27
- fno_downsampler_layers: 4
28
- fno_dropout: 0
29
- fno_mlp_layers: 1
30
- fno_n_modes: 16
31
-
32
- fno_hidden_channels_50k: 16
33
- fno_encoder_layers_50k: 6
34
-
35
- fno_hidden_channels_50mil: 200
36
- fno_encoder_layers_50mil: 4
37
-
38
- fno_downsampler:
39
- _target_: pdeinvbench.models.downsampler.ConvDownsampler
40
- input_dimension: ${system_params.downsampler_input_dim}
41
- n_layers: ${model.downsampler_layers}
42
- in_channels: ${model.hidden_channels}
43
- out_channels: ${model.hidden_channels}
44
- kernel_size: 3
45
- stride: 1
46
- padding: 2
47
- dropout: ${model.dropout}
48
-
49
- # ============ ResNet Architecture ============
50
- resnet_hidden_channels: 128
51
- resnet_encoder_layers: 13
52
- resnet_downsampler_layers: 4
53
- resnet_dropout: 0
54
- resnet_mlp_layers: 1
55
-
56
- resnet_downsampler:
57
- _target_: pdeinvbench.models.downsampler.ConvDownsampler
58
- input_dimension: ${system_params.downsampler_input_dim}
59
- n_layers: ${model.downsampler_layers}
60
- in_channels: ${model.hidden_channels}
61
- out_channels: ${model.hidden_channels}
62
- kernel_size: 3
63
- stride: 1
64
- padding: 2
65
- dropout: ${model.dropout}
66
-
67
- # ============ ScOT Architecture ============
68
- scot_hidden_channels: 32
69
- scot_encoder_layers: 4
70
- scot_downsampler_layers: 4
71
- scot_dropout: 0
72
- scot_mlp_layers: 1
73
- scot_mlp_hidden_size: 32
74
- scot_condition_on_time: False
75
- scot_embed_dim: 36
76
- scot_hidden_size: 32
77
- scot_patch_size: 4
78
- scot_num_heads: [3, 6, 12, 24]
79
- scot_skip_connections: [2, 2, 2, 2]
80
- scot_depths: [1, 1, 1, 1]
81
-
82
- scot_downsampler:
83
- _target_: pdeinvbench.models.downsampler.ConvDownsampler
84
- input_dimension: ${system_params.downsampler_input_dim}
85
- n_layers: ${model.downsampler_layers}
86
- in_channels: ${model.hidden_channels}
87
- out_channels: ${model.hidden_channels}
88
- kernel_size: 3
89
- stride: 1
90
- padding: 2
91
- dropout: ${model.dropout}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/tailoring_optimizer/adam.yaml DELETED
@@ -1,2 +0,0 @@
1
- _target_: torch.optim.Adam
2
- lr: ${tailoring_optimizer_lr}
 
 
 
configs/tailoring_optimizer/sgd.yaml DELETED
@@ -1,2 +0,0 @@
1
- _target_: torch.optim.SGD
2
- lr: ${tailoring_optimizer_lr}
 
 
 
configs/trainer/trainer.yaml DELETED
@@ -1,4 +0,0 @@
1
- _target_: lightning.Trainer
2
- max_epochs: 200
3
- log_every_n_steps: 10
4
- callbacks: ${callbacks}
 
 
 
 
 
configs/ttt_base.yaml DELETED
@@ -1,14 +0,0 @@
1
- name: ttt_base
2
- defaults:
3
- - _self_
4
- - base
5
- - tailoring_optimizer: adam
6
- - override lightning_module: ttt
7
-
8
- test_run: true
9
-
10
- tailor_anchor_loss_weight: 1
11
- tailor_residual_loss_weight: 1
12
- tailor_per_batch: True
13
- num_tailoring_steps: 50
14
- tailoring_optimizer_lr: 0.00001
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
environment.yml DELETED
@@ -1,158 +0,0 @@
1
- name: inv-env
2
- channels:
3
- - defaults
4
- - conda-forge
5
- dependencies:
6
- - _libgcc_mutex=0.1=main
7
- - _openmp_mutex=5.1=1_gnu
8
- - bzip2=1.0.8=h5eee18b_6
9
- - ca-certificates=2025.2.25=h06a4308_0
10
- - ld_impl_linux-64=2.40=h12ee557_0
11
- - libffi=3.4.4=h6a678d5_1
12
- - libgcc-ng=11.2.0=h1234567_1
13
- - libgomp=11.2.0=h1234567_1
14
- - libstdcxx-ng=11.2.0=h1234567_1
15
- - libuuid=1.41.5=h5eee18b_0
16
- - ncurses=6.4=h6a678d5_0
17
- - openssl=1.1.1w=h7f8727e_0
18
- - pip=25.1=pyhc872135_2
19
- - python=3.11.0=h7a1cb2a_3
20
- - readline=8.2=h5eee18b_0
21
- - setuptools=78.1.1=py311h06a4308_0
22
- - sqlite=3.45.3=h5eee18b_0
23
- - tk=8.6.14=h39e8969_0
24
- - wheel=0.45.1=py311h06a4308_0
25
- - xz=5.6.4=h5eee18b_1
26
- - zlib=1.2.13=h5eee18b_1
27
- - pip:
28
- - accelerate==0.31.0
29
- - aiohappyeyeballs==2.6.1
30
- - aiohttp==3.11.18
31
- - aiosignal==1.3.2
32
- - annotated-types==0.7.0
33
- - antlr4-python3-runtime==4.9.3
34
- - appdirs==1.4.4
35
- - attrs==25.3.0
36
- - black==25.1.0
37
- - certifi==2025.4.26
38
- - charset-normalizer==3.4.2
39
- - click==8.2.0
40
- - cmake==4.2.1
41
- - configmypy==0.2.0
42
- - contourpy==1.3.2
43
- - crc32c==2.7.1
44
- - cycler==0.12.1
45
- - decorator==5.2.1
46
- - docker-pycreds==0.4.0
47
- - donfig==0.8.1.post1
48
- - filelock==3.18.0
49
- - fonttools==4.58.0
50
- - frozenlist==1.6.0
51
- - fsspec==2025.3.2
52
- - gitdb==4.0.12
53
- - gitpython==3.1.44
54
- - h5py==3.13.0
55
- - huggingface-hub==0.31.2
56
- - hydra-core==1.3.2
57
- - idna==3.10
58
- - imageio==2.37.0
59
- - imageio-ffmpeg==0.6.0
60
- - iniconfig==2.1.0
61
- - jaxtyping==0.3.2
62
- - jinja2==3.1.6
63
- - kiwisolver==1.4.8
64
- - lightning==2.5.1.post0
65
- - lightning-utilities==0.14.3
66
- - lit==18.1.8
67
- - markupsafe==3.0.2
68
- - matplotlib==3.10.3
69
- - moviepy==2.1.2
70
- - mpmath==1.3.0
71
- - multidict==6.4.3
72
- - mypy-extensions==1.1.0
73
- - narwhals==1.39.1
74
- - networkx==3.4.2
75
- - neuraloperator==0.3.0
76
- - numcodecs==0.16.0
77
- - numpy==2.2.5
78
- - nvidia-cublas-cu11==11.10.3.66
79
- - nvidia-cublas-cu12==12.6.4.1
80
- - nvidia-cuda-cupti-cu11==11.7.101
81
- - nvidia-cuda-cupti-cu12==12.6.80
82
- - nvidia-cuda-nvrtc-cu11==11.7.99
83
- - nvidia-cuda-nvrtc-cu12==12.6.77
84
- - nvidia-cuda-runtime-cu11==11.7.99
85
- - nvidia-cuda-runtime-cu12==12.6.77
86
- - nvidia-cudnn-cu11==8.5.0.96
87
- - nvidia-cudnn-cu12==9.5.1.17
88
- - nvidia-cufft-cu11==10.9.0.58
89
- - nvidia-cufft-cu12==11.3.0.4
90
- - nvidia-cufile-cu12==1.11.1.6
91
- - nvidia-curand-cu11==10.2.10.91
92
- - nvidia-curand-cu12==10.3.7.77
93
- - nvidia-cusolver-cu11==11.4.0.1
94
- - nvidia-cusolver-cu12==11.7.1.2
95
- - nvidia-cusparse-cu11==11.7.4.91
96
- - nvidia-cusparse-cu12==12.5.4.2
97
- - nvidia-cusparselt-cu12==0.6.3
98
- - nvidia-nccl-cu11==2.14.3
99
- - nvidia-nccl-cu12==2.26.2
100
- - nvidia-nvjitlink-cu12==12.6.85
101
- - nvidia-nvtx-cu11==11.7.91
102
- - nvidia-nvtx-cu12==12.6.77
103
- - omegaconf==2.3.0
104
- - opt-einsum==3.4.0
105
- - packaging==24.2
106
- - pandas==2.2.3
107
- - pathspec==0.12.1
108
- - pathtools==0.1.2
109
- - pillow==10.4.0
110
- - platformdirs==4.3.8
111
- - plotly==6.1.0
112
- - pluggy==1.6.0
113
- - proglog==0.1.12
114
- - propcache==0.3.1
115
- - protobuf==4.25.8
116
- - psutil==7.0.0
117
- - pydantic==2.11.4
118
- - pydantic-core==2.33.2
119
- - pyparsing==3.2.3
120
- - pytest==8.3.5
121
- - pytest-mock==3.14.0
122
- - python-dateutil==2.9.0.post0
123
- - python-dotenv==1.1.0
124
- - pytorch-lightning==2.5.1.post0
125
- - pytz==2025.2
126
- - pyyaml==6.0.2
127
- - regex==2024.11.6
128
- - requests==2.32.3
129
- - ruamel-yaml==0.18.10
130
- - ruamel-yaml-clib==0.2.12
131
- - safetensors==0.5.3
132
- - scipy==1.15.3
133
- - scoringrules==0.7.1
134
- - scot==1.0.0
135
- - sentry-sdk==2.28.0
136
- - setproctitle==1.3.6
137
- - six==1.17.0
138
- - smmap==5.0.2
139
- - sympy==1.14.0
140
- - tensorly==0.9.0
141
- - tensorly-torch==0.5.0
142
- - tokenizers==0.13.3
143
- - torch==2.0.1
144
- - torch-harmonics==0.7.3
145
- - torchmetrics==1.7.1
146
- - torchvision==0.15.2
147
- - tqdm==4.67.1
148
- - transformers==4.29.2
149
- - triton==2.0.0
150
- - typeguard==2.13.3
151
- - typing-extensions==4.13.2
152
- - typing-inspection==0.4.0
153
- - tzdata==2025.2
154
- - urllib3==2.4.0
155
- - wadler-lindig==0.1.6
156
- - wandb==0.14.2
157
- - yarl==1.20.0
158
- - zarr==3.0.7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fluid_stats.py DELETED
@@ -1,418 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- Compute energy spectra from vorticity field data.
4
-
5
- This script loads vorticity trajectory data from a .npy file and computes
6
- the azimuthally averaged energy spectrum E(k). It outputs both the spectrum
7
- data as a .npz file and a visualization plot as a .png file.
8
-
9
- To run direct numerical simulations and get fluid fields, please use Jax-CFD: https://github.com/google/jax-cfd
10
- Commit hash we used: 0c17e3855702f884265b97bd6ff0793c34f3155e
11
-
12
- Usage:
13
- uv run python fluid_stats.py path/to/vorticity.npy --out_dir results/
14
- """
15
-
16
- import argparse
17
- import logging
18
- import os
19
- from functools import partial
20
-
21
- import jax
22
- import jax.numpy as jnp
23
- import matplotlib.pyplot as plt
24
- import numpy as np
25
- from jax import jit, vmap
26
- from tqdm import tqdm
27
-
28
- # Configure logging
29
- logging.basicConfig(
30
- level=logging.INFO,
31
- format="%(asctime)s - %(levelname)s - %(message)s",
32
- datefmt="%Y-%m-%d %H:%M:%S",
33
- )
34
- logger = logging.getLogger(__name__)
35
-
36
-
37
- # =============================================================================
38
- # Core computation functions
39
- # =============================================================================
40
-
41
-
42
- @jit
43
- def vorticity_to_velocity(vorticity):
44
- """
45
- Convert vorticity to velocity components using the streamfunction.
46
-
47
- Solves the Poisson equation in Fourier space: psi_hat = -vorticity_hat / k^2
48
- Then computes velocity from streamfunction: u_x = -d(psi)/dy, u_y = d(psi)/dx
49
-
50
- Parameters
51
- ----------
52
- vorticity : jnp.ndarray, shape (X, Y)
53
- 2D vorticity field on a square grid.
54
-
55
- Returns
56
- -------
57
- u_x : jnp.ndarray, shape (X, Y)
58
- x-component of velocity.
59
- u_y : jnp.ndarray, shape (X, Y)
60
- y-component of velocity.
61
- """
62
- N = vorticity.shape[0]
63
-
64
- # Compute streamfunction from vorticity using Poisson equation
65
- # In Fourier space: psi_hat = -vorticity_hat / k^2
66
- vort_hat = jnp.fft.fft2(vorticity)
67
-
68
- # Create wavenumber arrays
69
- kx = jnp.fft.fftfreq(N, d=1.0) * 2 * jnp.pi
70
- ky = jnp.fft.fftfreq(N, d=1.0) * 2 * jnp.pi
71
- KX, KY = jnp.meshgrid(kx, ky, indexing="ij")
72
- K2 = KX**2 + KY**2
73
-
74
- # Avoid division by zero at k=0
75
- K2 = K2.at[0, 0].set(1.0)
76
- psi_hat = -vort_hat / K2
77
- psi_hat = psi_hat.at[0, 0].set(0.0) # Set mean streamfunction to zero
78
-
79
- # Compute velocity components from streamfunction
80
- # u_x = -d(psi)/dy, u_y = d(psi)/dx
81
- u_x_hat = -1j * KY * psi_hat
82
- u_y_hat = 1j * KX * psi_hat
83
-
84
- u_x = jnp.real(jnp.fft.ifft2(u_x_hat))
85
- u_y = jnp.real(jnp.fft.ifft2(u_y_hat))
86
-
87
- return u_x, u_y
88
-
89
-
90
- @partial(jit, static_argnames=["k_max"])
91
- def energy_spectrum_single(u_x, u_y, k_max=None):
92
- """
93
- Compute azimuthally averaged energy spectrum E(k) for a single velocity field.
94
-
95
- The energy spectrum is computed by binning the 2D Fourier-transformed
96
- velocity field by wavenumber magnitude |k|.
97
-
98
- Parameters
99
- ----------
100
- u_x : jnp.ndarray, shape (X, Y)
101
- x-component of velocity.
102
- u_y : jnp.ndarray, shape (X, Y)
103
- y-component of velocity.
104
- k_max : int, optional
105
- Maximum wavenumber to compute. If None, uses N//3 (2/3 dealiasing rule).
106
-
107
- Returns
108
- -------
109
- E : jnp.ndarray, shape (k_max+1,)
110
- Energy spectrum E(k) for k = 0, 1, ..., k_max.
111
- """
112
- N = u_x.shape[0]
113
-
114
- # FFT, shifted so k=0 is at centre
115
- Ux = jnp.fft.fftshift(jnp.fft.fft2(u_x))
116
- Ux = Ux / (N**2)
117
- Uy = jnp.fft.fftshift(jnp.fft.fft2(u_y))
118
- Uy = Uy / (N**2)
119
-
120
- # Integer wave numbers
121
- kx = jnp.fft.fftshift(jnp.fft.fftfreq(N)) * N
122
- ky = kx
123
- KX, KY = jnp.meshgrid(kx, ky)
124
- K = jnp.hypot(KX, KY).astype(jnp.int32)
125
-
126
- if k_max is None: # Nyquist under 2/3 de-alias
127
- k_max = N // 3
128
-
129
- # Vectorized computation of energy spectrum
130
- def compute_E_k(k):
131
- mask = K == k
132
- return 0.5 * jnp.sum(jnp.abs(Ux) ** 2 * mask + jnp.abs(Uy) ** 2 * mask)
133
-
134
- k_vals = jnp.arange(k_max + 1)
135
- E = vmap(compute_E_k)(k_vals)
136
-
137
- return E
138
-
139
-
140
- @partial(jit, static_argnames=["k_max"])
141
- def energy_spectrum_from_vorticity(vorticity, k_max=None):
142
- """
143
- Compute energy spectrum from vorticity field using vmap.
144
-
145
- Suitable for moderate resolution fields (up to ~1024x1024).
146
- For larger resolutions, use energy_spectrum_from_vorticity_lax_map.
147
-
148
- Parameters
149
- ----------
150
- vorticity : jnp.ndarray, shape (T, X, Y)
151
- Vorticity field over T time steps on an X x Y grid.
152
- k_max : int, optional
153
- Maximum wavenumber. If None, uses N//3 (2/3 dealiasing rule).
154
-
155
- Returns
156
- -------
157
- E : jnp.ndarray, shape (T, k_max+1)
158
- Energy spectrum for each time step.
159
- """
160
- N = vorticity.shape[1]
161
-
162
- if k_max is None:
163
- k_max = N // 3
164
-
165
- def process_timestep(vort_t):
166
- u_x, u_y = vorticity_to_velocity(vort_t)
167
- return energy_spectrum_single(u_x, u_y, k_max)
168
-
169
- # Vectorize over time dimension
170
- E = vmap(process_timestep)(vorticity)
171
-
172
- return E
173
-
174
-
175
- @partial(jit, static_argnames=["k_max", "batch_size"])
176
- def energy_spectrum_from_vorticity_lax_map(vorticity, k_max=None, batch_size=16):
177
- """
178
- Compute energy spectrum from vorticity field using jax.lax.map.
179
-
180
- Memory-efficient version suitable for high resolution fields (>1024x1024).
181
- Processes timesteps sequentially to reduce memory footprint.
182
-
183
- Parameters
184
- ----------
185
- vorticity : jnp.ndarray, shape (T, X, Y)
186
- Vorticity field over T time steps on an X x Y grid.
187
- k_max : int, optional
188
- Maximum wavenumber. If None, uses N//3 (2/3 dealiasing rule).
189
- batch_size : int, optional
190
- Batch size for lax.map processing. Default is 16.
191
-
192
- Returns
193
- -------
194
- E : jnp.ndarray, shape (T, k_max+1)
195
- Energy spectrum for each time step.
196
- """
197
- N = vorticity.shape[1]
198
-
199
- if k_max is None:
200
- k_max = N // 3
201
-
202
- def process_timestep(vort_t):
203
- u_x, u_y = vorticity_to_velocity(vort_t)
204
- return energy_spectrum_single(u_x, u_y, k_max)
205
-
206
- # Use lax.map instead of vmap for memory efficiency
207
- E = jax.lax.map(process_timestep, vorticity, batch_size=batch_size)
208
-
209
- return E
210
-
211
-
212
- # =============================================================================
213
- # Main script
214
- # =============================================================================
215
-
216
-
217
- def parse_args():
218
- """Parse command line arguments."""
219
- parser = argparse.ArgumentParser(
220
- description=(
221
- "Compute energy spectra from 2D vorticity trajectory data. "
222
- "Loads vorticity fields from a .npy file, computes the azimuthally "
223
- "averaged energy spectrum E(k), and saves both the spectrum data "
224
- "and a visualization plot."
225
- ),
226
- formatter_class=argparse.RawDescriptionHelpFormatter,
227
- epilog="""
228
- Examples:
229
- uv run python fluid_stats.py simulation.npy
230
- uv run python fluid_stats.py data/vorticity.npy --out_dir results/
231
-
232
- Input format:
233
- The input .npy file should contain a 4D array with shape (batch, time, X, Y)
234
- where batch is the number of independent trajectories, time is the number
235
- of snapshots, and X, Y are the spatial grid dimensions.
236
- """,
237
- )
238
-
239
- parser.add_argument(
240
- "input_file",
241
- type=str,
242
- help=(
243
- "Path to the input .npy file containing vorticity data. "
244
- "Expected shape: (batch, time, X, Y) where X and Y are the "
245
- "spatial grid dimensions (must be square, i.e., X == Y)."
246
- ),
247
- )
248
-
249
- parser.add_argument(
250
- "--out_dir",
251
- type=str,
252
- default=".",
253
- help=(
254
- "Directory to save output files. Will be created if it does not "
255
- "exist. Output files are named based on the input filename. "
256
- "Default: current directory."
257
- ),
258
- )
259
-
260
- return parser.parse_args()
261
-
262
-
263
- def main():
264
- """Main entry point for energy spectrum computation."""
265
- args = parse_args()
266
-
267
- # Setup
268
- logger.info("JAX devices: %s", jax.devices())
269
-
270
- # Validate input file
271
- if not os.path.exists(args.input_file):
272
- logger.error("Input file not found: %s", args.input_file)
273
- raise FileNotFoundError(f"Input file not found: {args.input_file}")
274
-
275
- if not args.input_file.endswith(".npy"):
276
- logger.warning(
277
- "Input file does not have .npy extension: %s", args.input_file
278
- )
279
-
280
- # Create output directory
281
- os.makedirs(args.out_dir, exist_ok=True)
282
-
283
- # Generate output filenames from input filename
284
- input_basename = os.path.splitext(os.path.basename(args.input_file))[0]
285
- data_filename = f"{input_basename}_spectrum_data.npz"
286
- plot_filename = f"{input_basename}_spectrum.png"
287
- data_path = os.path.join(args.out_dir, data_filename)
288
- plot_path = os.path.join(args.out_dir, plot_filename)
289
-
290
- # Load data
291
- logger.info("Loading data from: %s", args.input_file)
292
- field = np.load(args.input_file)
293
- logger.info("Loaded field with shape: %s", field.shape)
294
-
295
- # Validate shape
296
- if field.ndim != 4:
297
- logger.error(
298
- "Expected 4D array (batch, time, X, Y), got %dD array", field.ndim
299
- )
300
- raise ValueError(
301
- f"Expected 4D array (batch, time, X, Y), got {field.ndim}D array"
302
- )
303
-
304
- batch_size, time_steps, height, width = field.shape
305
- if height != width:
306
- logger.error(
307
- "Expected square spatial grid (X == Y), got %d x %d", height, width
308
- )
309
- raise ValueError(
310
- f"Expected square spatial grid (X == Y), got {height} x {width}"
311
- )
312
-
313
- resolution = height
314
- k_max = resolution // 3
315
- logger.info(
316
- "Processing %d trajectories with %d timesteps at %dx%d resolution",
317
- batch_size,
318
- time_steps,
319
- resolution,
320
- resolution,
321
- )
322
- logger.info("Maximum wavenumber (k_max): %d", k_max)
323
-
324
- # Compute energy spectrum
325
- logger.info("Computing energy spectra...")
326
- spectra_list = []
327
-
328
- for i in tqdm(range(batch_size), desc="Computing spectra"):
329
- if resolution > 1024:
330
- # Use memory-efficient lax.map for large resolutions
331
- single_spectrum = energy_spectrum_from_vorticity_lax_map(
332
- field[i], k_max
333
- )
334
- else:
335
- # Use vmap for moderate resolutions
336
- single_spectrum = energy_spectrum_from_vorticity(field[i], k_max)
337
- spectra_list.append(single_spectrum)
338
-
339
- # Stack all spectra
340
- all_spectra = jnp.stack(spectra_list)
341
- logger.info("All spectra shape: %s", all_spectra.shape)
342
-
343
- # Compute mean spectrum (over batch and time)
344
- mean_spectrum = all_spectra.reshape(-1, all_spectra.shape[-1]).mean(axis=0)
345
- logger.info("Mean spectrum shape: %s", mean_spectrum.shape)
346
-
347
- # Save spectrum data
348
- logger.info("Saving spectrum data to: %s", data_path)
349
- np.savez_compressed(
350
- data_path,
351
- mean_spectrum=np.array(mean_spectrum),
352
- all_spectra=np.array(all_spectra),
353
- k_values=np.arange(len(mean_spectrum)),
354
- resolution=resolution,
355
- batch_size=batch_size,
356
- time_steps=time_steps,
357
- )
358
-
359
- # Generate plot
360
- logger.info("Generating energy spectrum plot...")
361
- plt.figure(figsize=(10, 6))
362
-
363
- # Plot mean spectrum (skip k=0)
364
- offset = 1
365
- spectrum = mean_spectrum[offset:]
366
- k_values = np.arange(offset, len(mean_spectrum))
367
- plt.loglog(k_values, spectrum, "b-", linewidth=2, label="Mean spectrum")
368
-
369
- # Add k^{-5/3} reference line (Kolmogorov scaling for 3D turbulence)
370
- # and k^{-3} reference line (enstrophy cascade in 2D turbulence)
371
- k_match = min(10, len(spectrum) // 3)
372
- if k_match > 0:
373
- ref_value = float(spectrum[k_match - 1])
374
-
375
- # k^{-3} line (2D enstrophy cascade)
376
- scaling_k3 = ref_value * (k_match**3)
377
- k_theory = np.logspace(0, np.log10(len(mean_spectrum)), 100)
378
- power_law_k3 = scaling_k3 * k_theory ** (-3)
379
- plt.loglog(
380
- k_theory,
381
- power_law_k3,
382
- "k--",
383
- alpha=0.7,
384
- linewidth=1.5,
385
- label=r"$k^{-3}$ (enstrophy cascade)",
386
- )
387
-
388
- # k^{-5/3} line (inverse energy cascade)
389
- scaling_k53 = ref_value * (k_match ** (5 / 3))
390
- power_law_k53 = scaling_k53 * k_theory ** (-5 / 3)
391
- plt.loglog(
392
- k_theory,
393
- power_law_k53,
394
- "r--",
395
- alpha=0.7,
396
- linewidth=1.5,
397
- label=r"$k^{-5/3}$ (energy cascade)",
398
- )
399
-
400
- plt.xlabel("Wavenumber k", fontsize=12)
401
- plt.ylabel("Energy Spectrum E(k)", fontsize=12)
402
- plt.title(f"Energy Spectrum ({resolution}x{resolution} resolution)", fontsize=14)
403
- plt.legend()
404
- plt.grid(True, alpha=0.3)
405
- xlim = plt.xlim()
406
- plt.xlim(1, xlim[1])
407
- plt.tight_layout()
408
-
409
- # Save plot
410
- plt.savefig(plot_path, dpi=300, bbox_inches="tight")
411
- plt.close()
412
- logger.info("Plot saved to: %s", plot_path)
413
-
414
- logger.info("Done!")
415
-
416
-
417
- if __name__ == "__main__":
418
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
huggingface_pdeinv_download.py DELETED
@@ -1,60 +0,0 @@
1
- import argparse
2
- from huggingface_hub import snapshot_download
3
-
4
- datasets = [
5
- "darcy-flow-241",
6
- "darcy-flow-421",
7
- "korteweg-de-vries-1d",
8
- "navier-stokes-forced-2d-2048",
9
- "navier-stokes-forced-2d",
10
- "navier-stokes-unforced-2d",
11
- "reaction-diffusion-2d-du-512",
12
- "reaction-diffusion-2d-du",
13
- "reaction-diffusion-2d-k-512",
14
- "reaction-diffusion-2d-k",
15
- ]
16
-
17
- splits = [
18
- "*",
19
- "train",
20
- "validation",
21
- "test",
22
- "out_of_distribution",
23
- "out_of_distribution_extreme",
24
- ]
25
-
26
-
27
- def main():
28
- parser = argparse.ArgumentParser(
29
- description="Download PDE Inverse Problem Benchmarking datasets"
30
- )
31
- parser.add_argument(
32
- "--dataset",
33
- type=str,
34
- default="darcy-flow-241",
35
- choices=datasets,
36
- help="Dataset to download",
37
- )
38
- parser.add_argument(
39
- "--split", type=str, default="*", choices=splits, help="Data split to download"
40
- )
41
- parser.add_argument(
42
- "--local-dir", type=str, default="", help="Local directory to save data"
43
- )
44
-
45
- args = parser.parse_args()
46
-
47
- data_bucket = "DabbyOWL/PDE_Inverse_Problem_Benchmarking"
48
-
49
- print(f"Downloading {args.dataset}/{args.split} to {args.local_dir}")
50
-
51
- snapshot_download(
52
- data_bucket,
53
- allow_patterns=[f"{args.dataset}/{args.split}/*"],
54
- local_dir=args.local_dir,
55
- repo_type="dataset",
56
- )
57
-
58
-
59
- if __name__ == "__main__":
60
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
images/1dkdv.png DELETED

Git LFS Details

  • SHA256: 80bc2c529faa8b4aff5ff2a5e7ad3543c66da8694e173aa24e598dddebcdb801
  • Pointer size: 130 Bytes
  • Size of remote file: 24.7 kB
images/2d_navier_stokes_unforced_train_val_split.png DELETED

Git LFS Details

  • SHA256: 1fca27e1e0f475f207ecd1581d8dfe309148373cc4256006b6eb6e1909599b8f
  • Pointer size: 130 Bytes
  • Size of remote file: 25.6 kB
images/2ddf.png DELETED

Git LFS Details

  • SHA256: 1e36b7d1a0edff49332fe1a724c7fc247d2a0c93a9dd94e5eb23f20b05dde710
  • Pointer size: 130 Bytes
  • Size of remote file: 70.4 kB