Skip to content

Commit

Permalink
update module scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
sarahkpardo committed Sep 14, 2022
1 parent deca6d6 commit 460a2f1
Show file tree
Hide file tree
Showing 7 changed files with 297 additions and 56 deletions.
15 changes: 15 additions & 0 deletions ndmd-env.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
name: ndmd-env
channels:
- defaults
dependencies:
- pytorch
- torchvision
- torchaudio
- notebook
- nb_conda_kernels
- pip
- einops
- matplotlib
- scipy
- h5py
- tqdm
6 changes: 6 additions & 0 deletions neural_operators/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
from neural_operators import data
from neural_operators import kernel
from neural_operators import loss
from neural_operators import math_utils
from neural_operators import model
from neural_operators import train
244 changes: 243 additions & 1 deletion neural_operators/data.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,253 @@
from pathlib import Path
from timeit import default_timer

import h5py
from einops import rearrange, reduce, repeat
import numpy as np
import scipy.io
from scipy.io import loadmat
import torch


class MatReader(object):
def __init__(self, file_path, to_torch=True, to_cuda=False, to_float=True):
super(MatReader, self).__init__()

self.to_torch = to_torch
self.to_cuda = to_cuda
self.to_float = to_float

self.file_path = file_path

self.data = None
self.old_mat = None
self._load_file()

def _load_file(self):
try:
self.data = scipy.io.loadmat(self.file_path)
self.old_mat = True
except:
self.data = h5py.File(self.file_path)
self.old_mat = False

def load_file(self, file_path):
self.file_path = file_path
self._load_file()

def read_field(self, field):
x = self.data[field]

if not self.old_mat:
x = x[()]
x = np.transpose(x, axes=range(len(x.shape) - 1, -1, -1))

if self.to_float:
x = x.astype(np.float32)

if self.to_torch:
x = torch.from_numpy(x)

if self.to_cuda:
x = x.cuda()

return x

def set_cuda(self, to_cuda):
self.to_cuda = to_cuda

def set_torch(self, to_torch):
self.to_torch = to_torch

def set_float(self, to_float):
self.to_float = to_float


def mat_to_tensor1d(TRAIN_PATH,
TEST_PATH,
ss_rate,
x_field, y_field,
vsamples=None,
normalize=False):
"""Converts .mat file contents to torch tensors.
Args:
TRAIN_PATH: list of .mat file names to concatenate into training
tensors
TEST_PATH: list of .mat file names to concatenate into test tensors
ss_rate: [train subsampling rate, test rate]
x_field, y_field: names of fields in the .mat file
vsamples: [ntest, ntrain]
print: Print log contents to the console; default = True
normalize: Apply normalization to the tensors; default = False
"""
t1 = default_timer()

reader = MatReader(TRAIN_PATH)
x_data = reader.read_field(x_field)
y_data = reader.read_field(y_field)
dimension = len(x_data.shape) - 1

mat_info = ("input signal vector samples: {}\n"
"output signal vector samples: {}\n"
"input signal entry samples: {}\n"
"output signal entry samples: {}\n"
"signal dimension: {}\n\n"
).format(x_data.shape[0],
y_data.shape[0],
x_data.shape[1],
y_data.shape[1],
dimension)

ntrain = vsamples[0]
ntest = vsamples[1]

full_res = x_data.shape[1]
tr_ss = ss_rate[0]
tst_ss = ss_rate[1]
tr_esamples = int(((full_res - 1) / tr_ss) + 1)

x_train = x_data[ntrain:, ::tr_ss][:, :tr_esamples]
y_train = y_data[ntrain:, ::tr_ss][:, :tr_esamples]

if TRAIN_PATH != TEST_PATH:
# using separate test/train datasets
test_reader = MatReader(TEST_PATH)
x_test = test_reader.read_field(x_field)
y_test = test_reader.read_field(y_field)

full_res = x_test.shape[1]
tst_esamples = int(((full_res - 1) / tst_ss) + 1)

x_test = x_test[ntest:, ::tst_ss][:, :tst_esamples]
y_test = y_test[ntest:, ::tst_ss][:, :tst_esamples]

else:
full_res = x_data.shape[1]
tst_esamples = int(((full_res - 1) / tst_ss) + 1)

# same dataset; use last (ntest) samples
x_test = x_data[-ntest:, ::tst_ss][:, :tst_esamples]
y_test = y_data[-ntest:, ::tst_ss][:, :tst_esamples]

ds_info = ("training dataset: {}\n"
"test dataset: {}\n\n"
"input train samples: {}\n"
"output train samples: {}\n"
"input train resolution: {}\n"
"output train resolution: {}\n\n"
"input test samples: {}\n"
"output test samples: {}\n"
"input test resolution: {}\n"
"output test resolution: {}\n\n"
).format(TRAIN_PATH,
TEST_PATH,
x_train.shape[0],
y_train.shape[0],
x_train.shape[1],
y_train.shape[1],
x_test.shape[0],
y_test.shape[0],
x_test.shape[1],
y_test.shape[1])

t2 = default_timer()

return x_train, y_train, x_test, y_test, mat_info, ds_info


def mat_to_tensor2d(TRAIN_PATH,
TEST_PATH,
ss_rate,
x_field, y_field,
vsamples=None,
normalize=False):
"""Converts .mat file contents to torch tensors.
Args:
TRAIN_PATH: list of .mat file names to concatenate into training
tensors
TEST_PATH: list of .mat file names to concatenate into test tensors
ss_rate: [train subsampling rate, test rate]
x_field, y_field: names of fields in the .mat file
vsamples: [ntest, ntrain]
print: Print log contents to the console; default = True
normalize: Apply normalization to the tensors; default = False
"""
t1 = default_timer()

reader = MatReader(TRAIN_PATH)
x_data = reader.read_field(x_field)
y_data = reader.read_field(y_field)
dimension = len(x_data.shape) - 1

mat_info = ("input signal vector samples: {}\n"
"output signal vector samples: {}\n"
"input signal entry samples: {}\n"
"output signal entry samples: {}\n"
"signal dimension: {}\n\n"
).format(x_data.shape[0],
y_data.shape[0],
x_data.shape[1],
y_data.shape[1],
dimension)

ntrain = vsamples[0]
ntest = vsamples[1]

full_res = x_data.shape[1]
tr_ss = ss_rate[0]
tst_ss = ss_rate[1]
tr_esamples = int(((full_res - 1) / tr_ss) + 1)

x_train = x_data[:ntrain, ::tr_ss, ::tr_ss][:, :tr_esamples, :tr_esamples]
y_train = y_data[:ntrain, ::tr_ss, ::tr_ss][:, :tr_esamples, :tr_esamples]

if TRAIN_PATH != TEST_PATH:
# using separate test/train datasets
test_reader = MatReader(TEST_PATH)
x_test = test_reader.read_field(x_field)
y_test = test_reader.read_field(y_field)

full_res = x_test.shape[1]
tst_esamples = int(((full_res - 1) / tst_ss) + 1)

x_test = x_test[:ntest, ::tst_ss, ::tst_ss][:, :tst_esamples, :tst_esamples]
y_test = y_test[:ntest, ::tst_ss, ::tst_ss][:, :tst_esamples, :tst_esamples]

else:
full_res = x_data.shape[1]
tst_esamples = int(((full_res - 1) / tst_ss) + 1)

# same dataset; use last (ntest) samples
x_test = x_data[-ntest:, ::tst_ss, ::tst_ss][:, :tst_esamples, :tst_esamples]
y_test = y_data[-ntest:, ::tst_ss, ::tst_ss][:, :tst_esamples, :tst_esamples]

ds_info = ("training dataset: {}\n"
"test dataset: {}\n\n"
"input train samples: {}\n"
"output train samples: {}\n"
"input train resolution: {}\n"
"output train resolution: {}\n\n"
"input test samples: {}\n"
"output test samples: {}\n"
"input test resolution: {}\n"
"output test resolution: {}\n\n"
).format(TRAIN_PATH,
TEST_PATH,
x_train.shape[0],
y_train.shape[0],
x_train.shape[1],
y_train.shape[1],
x_test.shape[0],
y_test.shape[0],
x_test.shape[1],
y_test.shape[1])

t2 = default_timer()

return x_train, y_train, x_test, y_test, mat_info, ds_info


def read_data(
path,
property_names,
Expand Down Expand Up @@ -271,3 +512,4 @@ def gen_dataset(
name='test_data')

return train_DL, test_DL

53 changes: 29 additions & 24 deletions neural_operators/kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,24 @@
import torch.nn as nn
import torch.nn.functional as F

from math_utils import complex_mul1d, complex_mul2d
from neural_operators.math_utils import complex_mul1d, complex_mul2d


class SpectralConv1d(nn.Module):
def __init__(self, in_channels, out_channels, modes, trainable=True):
def __init__(self,
in_channels,
out_channels,
modes,
trainable=True):
super(SpectralConv1d, self).__init__()

"""Applies FFT, linear transform, and inverse FFT.
Args:
in_channels: number of input channels
out_channels: number of output channels
modes: number of Fourier modes to multiply (at most floor(N/2) + 1)
train:
modes: number of Fourier modes to multiply
train: make weights trainable parameters
"""

self.in_channels = in_channels
Expand All @@ -25,31 +31,25 @@ def __init__(self, in_channels, out_channels, modes, trainable=True):
self.trainable = trainable
self.scale = (1 / (in_channels * out_channels))

self.weights = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, self.modes, 2))
self.weights = nn.Parameter(self.scale *
torch.rand(self.in_channels,
self.out_channels,
self.modes,
dtype=torch.cfloat))
self.weights.requires_grad = trainable

def forward(self, x):
batchsize = x.shape[0]

# Compute Fourier coeffcients up to factor of e^(-constant)
x_ft = torch.rfft(x, 1, normalized=True, onesided=True)
x_ft = torch.fft.rfft(x)

# Multiply relevant Fourier modes
out_ft = torch.zeros(batchsize,
self.in_channels,
x.size(-1) // 2 + 1,
2,
device=x.device)
out_ft[:, :, :self.modes] = complex_mul1d(x_ft[:, :, :self.modes],
self.weights)
out_ft = complex_mul1d(x_ft[:, :, :self.modes],
self.weights)

# Return to physical space
x = torch.irfft(out_ft,
1,
normalized=True,
onesided=True,
signal_sizes=(x.size(-1), ))
x = torch.fft.irfft(out_ft,n=x.size(-1))

return x

Expand Down Expand Up @@ -198,7 +198,10 @@ def forward(self, x):
batchsize = x.shape[0]

# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.rfft(x, 2, normalized=True, onesided=True)
x_ft = torch.fft.rfft(x, 2,
norm="forward",
#normalized=True, onesided=True
)

# Apply transform consisting of truncated Fourier modes
out_ft = torch.zeros(batchsize,
Expand All @@ -217,11 +220,13 @@ def forward(self, x):
self.weights2)

# Return to physical space
x = torch.irfft(out_ft,
x = torch.fft.irfft(out_ft,
2,
normalized=True,
onesided=True,
signal_sizes=(x.size(-2), x.size(-1)))
norm="forward",
#normalized=True,
#onesided=True,
#signal_sizes=(x.size(-2), x.size(-1))
)

return x

Expand Down
1 change: 0 additions & 1 deletion neural_operators/loss.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import numpy as np
import torch


Expand Down
Loading

0 comments on commit 460a2f1

Please sign in to comment.