Skip to content

Commit

Permalink
Ultralytics Refactor https://ultralytics.com/actions (#24)
Browse files Browse the repository at this point in the history
Co-authored-by: UltralyticsAssistant <[email protected]>
  • Loading branch information
glenn-jocher and UltralyticsAssistant committed Jun 20, 2024
1 parent 22a3cf3 commit 6267179
Show file tree
Hide file tree
Showing 6 changed files with 59 additions and 9 deletions.
14 changes: 7 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
<br>
<img src="https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Original.svg" width="320">
<a href="https://ultralytics.com" target="_blank"><img src="https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Original.svg" width="320" alt="Ultralytics logo"></a>

# 🌊 Introduction

Expand All @@ -25,11 +25,11 @@ The primary goal of this project is to develop and share Machine Learning techni
Before you dive into waveform vector exploitation with our WAVE code, make sure your machine is set up with the following:

- Python 3.7 or later, plus these packages installed with `pip3 install -U -r requirements.txt`:
- `numpy`
- `scipy`
- `torch` (version 0.4.0 or later)
- `tensorflow` (version 1.8.0 or later)
- `plotly` (optional, for visualization)
- `numpy`
- `scipy`
- `torch` (version 0.4.0 or later)
- `tensorflow` (version 1.8.0 or later)
- `plotly` (optional, for visualization)

# 🏃 Run Instructions

Expand Down Expand Up @@ -78,7 +78,7 @@ For bug reports, feature requests, and contributions, head to [GitHub Issues](ht
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="Ultralytics TikTok"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://www.instagram.com/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="3%" alt="Ultralytics Instagram"></a>
<a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="3%" alt="Ultralytics Instagram"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
</div>
19 changes: 18 additions & 1 deletion gcp/wave_pytorch_gcp.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@


def runexample(H, model, str, lr=0.001, amsgrad=False):
"""Train a model on waveform data with specified hyperparameters, validate performance, and return best epoch
results.
"""
epochs = 100000
validations = 5000
printInterval = 1000
Expand Down Expand Up @@ -115,26 +118,35 @@ def runexample(H, model, str, lr=0.001, amsgrad=False):

class LinearAct(torch.nn.Module):
def __init__(self, nx, ny):
"""Initializes the LinearAct module with input and output dimensions and defines a linear transformation
followed by a Tanh activation.
"""
super(LinearAct, self).__init__()
self.Linear1 = torch.nn.Linear(nx, ny)
self.act = torch.nn.Tanh()

def forward(self, x):
"""Applies a linear transformation followed by Tanh activation to the input tensor."""
return self.act(self.Linear1(x))


class WAVE(torch.nn.Module):
def __init__(self, n): # n = [512, 108, 23, 5, 1]
"""Initializes the WAVE model with specified linear layers and activation functions."""
super(WAVE, self).__init__()
self.fc0 = LinearAct(n[0], n[1])
self.fc1 = LinearAct(n[1], n[2])
self.fc2 = torch.nn.Linear(n[2], n[3])

def forward(self, x):
"""Computes the forward pass of the WAVE model through its linear and activation layers."""
return self.fc2(self.fc1(self.fc0(x)))


def tsact(): # TS activation function
"""Implements a TS activation function using WAVE model with Sigmoid activation and saves the result in
'TS.sigmoid.mat'.
"""
H = [512, 64, 8, 1]
tsv = ["Sigmoid"] # ['Tanh', 'LogSigmoid', 'Softsign', 'ELU']
# tsv = np.logspace(-4,-2,11)
Expand Down Expand Up @@ -167,6 +179,7 @@ def forward(self, x):


def tsnoact(): # TS activation function
"""Generates and saves a TS dataset using a neural network model without any activation functions."""
H = [512, 64, 8, 1]
tsv = ["NoAct"] # ['Tanh', 'LogSigmoid', 'Softsign', 'ELU']
# tsv = np.logspace(-4,-2,11)
Expand All @@ -190,6 +203,9 @@ def forward(self, x):


def tslr(): # TS learning rate
"""Generate and save learning rate (LR) logs for time-series models with varying LRs using WAVE and TanH
activation.
"""
tsv = np.logspace(-5, -2, 13)
tsy = []
for a in tsv:
Expand All @@ -198,6 +214,7 @@ def tslr(): # TS learning rate


def tsams(): # TS AMSgrad
"""Trains models using AMSgrad with Tanh activation and saves the results to a .mat file."""
tsv = [False, True]
tsy = []
for a in tsv:
Expand All @@ -206,7 +223,7 @@ def tsams(): # TS AMSgrad


def tsshape(): # TS network shape
# H = [32] # 512 inputs, 2 outputs structures:
"""Determines the shape of the TS network and saves the results to a .mat file."""
# H = [81, 13]
# H = [128, 32, 8]
# H = [169, 56, 18, 6]
Expand Down
15 changes: 15 additions & 0 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@


def train(H, model, str, lr=0.001):
"""Trains a given model on provided data with specified hyperparameters and saves training results."""
data = "wavedata25ns.mat"

cuda = torch.cuda.is_available()
Expand Down Expand Up @@ -122,12 +123,14 @@ def train(H, model, str, lr=0.001):
# 400 5.1498e-05 0.023752 12.484 0.15728 # var 0
class WAVE(torch.nn.Module):
def __init__(self, n=(512, 64, 8, 2)):
"""Initializes the WAVE model architecture with specified layer sizes."""
super(WAVE, self).__init__()
self.fc0 = nn.Linear(n[0], n[1])
self.fc1 = nn.Linear(n[1], n[2])
self.fc2 = nn.Linear(n[2], n[3])

def forward(self, x): # x.shape = [bs, 512]
"""Performs a forward pass through the WAVE model transforming input x from shape [bs, 512] to [bs, 2]."""
x = torch.tanh(self.fc0(x)) # [bs, 64]
x = torch.tanh(self.fc1(x)) # [bs, 8]
return self.fc2(x) # [bs, 2]
Expand All @@ -137,6 +140,7 @@ def forward(self, x): # x.shape = [bs, 512]
# 121 0.47059 0.0306 14.184 0.1608
class WAVE4(nn.Module):
def __init__(self, n_out=2):
"""Initializes the WAVE4 model with specified output layers and configurations for convolutional layers."""
super(WAVE4, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=(1, 9), stride=(1, 2), padding=(0, 4), bias=False),
Expand All @@ -153,6 +157,9 @@ def __init__(self, n_out=2):
self.layer3 = nn.Conv2d(64, n_out, kernel_size=(2, 64), stride=(1, 1), padding=(0, 0))

def forward(self, x): # x.shape = [bs, 512]
"""Forward pass for processing input tensor through convolutional layers and reshaping output for
classification.
"""
x = x.view((-1, 2, 256)) # [bs, 2, 256]
x = x.unsqueeze(1) # [bs, 1, 2, 256] = = [N, C, H, W]
x = self.layer1(x) # [bs, 32, 1, 128]
Expand All @@ -164,6 +171,9 @@ def forward(self, x): # x.shape = [bs, 512]
# 65 4.22e-05 0.021527 11.883 0.14406
class WAVE3(nn.Module):
def __init__(self, n_out=2):
"""Initializes the WAVE3 class with neural network layers for feature extraction and classification in a
sequential manner.
"""
super(WAVE3, self).__init__()
n = 32
self.layer1 = nn.Sequential(
Expand All @@ -188,6 +198,9 @@ def __init__(self, n_out=2):
self.layer4 = nn.Conv2d(n * 4, n_out, kernel_size=(1, 32), stride=1, padding=0)

def forward(self, x): # x.shape = [bs, 512]
"""Performs the forward pass for input tensor `x` through the defined neural network layers, reshaping as
necessary.
"""
x = x.view((-1, 2, 256)) # [bs, 2, 256]
x = x.unsqueeze(2) # [bs, 2, 1, 256] = [N, C, H, W]
x = self.layer1(x) # [bs, 32, 1, 128]
Expand All @@ -203,6 +216,7 @@ def forward(self, x): # x.shape = [bs, 512]
# 121 2.6941e-05 0.021642 11.923 0.14201 # var 1
class WAVE2(nn.Module):
def __init__(self, n_out=2):
"""Initializes the WAVE2 model architecture components."""
super(WAVE2, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=(2, 30), stride=(1, 2), padding=(1, 15), bias=False),
Expand All @@ -219,6 +233,7 @@ def __init__(self, n_out=2):
self.layer3 = nn.Sequential(nn.Conv2d(64, n_out, kernel_size=(2, 64), stride=(1, 1), padding=(0, 0)))

def forward(self, x): # x.shape = [bs, 512]
"""Forward pass for processing input tensor x through sequential layers, reshaping as needed for the model."""
x = x.view((-1, 2, 256)) # [bs, 2, 256]
x = x.unsqueeze(1) # [bs, 1, 2, 256]
x = self.layer1(x) # [bs, 32, 1, 128]
Expand Down
1 change: 1 addition & 0 deletions train_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@


def runexample(H, model, str):
"""Trains a TensorFlow model using provided hyperparameters and data, then evaluates and saves the results."""
lr = 0.002
eps = 0.001
epochs = 50000
Expand Down
2 changes: 2 additions & 0 deletions utils/torch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,14 @@


def init_seeds(seed=0):
"""Initialize random seeds for CPU and GPU operations using a given integer seed value."""
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)


def select_device(force_cpu=False):
"""Select the appropriate device (CPU or CUDA) based on availability and the force_cpu flag."""
cuda = False if force_cpu else torch.cuda.is_available()
device = torch.device("cuda:0" if cuda else "cpu")

Expand Down
17 changes: 16 additions & 1 deletion utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@


def normalize(x, axis=None): # normalize x mean and std by axis
"""Normalize an array 'x' by its mean and standard deviation along the specified axis."""
if axis is None:
mu, sigma = x.mean(), x.std()
elif axis == 0:
Expand All @@ -21,12 +22,14 @@ def normalize(x, axis=None): # normalize x mean and std by axis


def shuffledata(x, y): # randomly shuffle x and y by same axis=0 indices
"""Randomly shuffles arrays x and y along the same axis=0 indices."""
i = np.arange(x.shape[0])
np.random.shuffle(i)
return x[i], y[i]


def splitdata(x, y, train=0.7, validate=0.15, test=0.15, shuffle=False): # split training data
"""Splits data arrays x and y into training, validation, and test sets with optional shuffling."""
n = x.shape[0]
if shuffle:
x, y = shuffledata(x, y)
Expand All @@ -37,21 +40,25 @@ def splitdata(x, y, train=0.7, validate=0.15, test=0.15, shuffle=False): # spli


def stdpt(r, ys): # MSE loss + standard deviation (pytorch)
"""Calculate Mean Squared Error loss and standard deviation of tensor r, scaled by ys."""
r = r.detach()
loss = (r**2).mean().cpu().item()
std = r.std(0).cpu().numpy() * ys
return loss, std


def stdtf(r, ys): # MSE loss + standard deviation (tf eager)
"""Calculate Mean Squared Error loss and standard deviation of tensor r using TensorFlow, scaled by ys."""
r = r.numpy()
loss = (r**2).mean()
std = r.std(0) * ys
return loss, std


def model_info(model):
# Plots a line-by-line description of a PyTorch model
"""Print a detailed line-by-line summary of a PyTorch model including layer name, gradient status, parameters,
shape, mean, and std.
"""
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
print("\n%5s %40s %9s %12s %20s %10s %10s" % ("layer", "name", "gradient", "parameters", "shape", "mu", "sigma"))
Expand All @@ -66,6 +73,7 @@ def model_info(model):

class patienceStopper(object):
def __init__(self, patience=10, verbose=True, epochs=1000, printerval=10):
"""Initialize a patience stopper with given parameters for early stopping in training."""
self.patience = patience
self.verbose = verbose
self.bestepoch = 0
Expand All @@ -78,11 +86,15 @@ def __init__(self, patience=10, verbose=True, epochs=1000, printerval=10):
self.printerval = printerval

def reset(self):
"""Resets tracking metrics to initial states for the training process."""
self.bestloss = float("inf")
self.bestmetrics = None
self.num_bad_epochs = 0

def step(self, loss, metrics=None, model=None):
"""Updates internal state for each training epoch, tracking loss and metrics, and printing progress
periodically.
"""
loss = loss.item()
self.num_bad_epochs += 1
self.epoch += 1
Expand Down Expand Up @@ -110,10 +122,12 @@ def step(self, loss, metrics=None, model=None):
return False

def first(self, model):
"""Prints a formatted header for model training details including epoch, time, loss, and metrics."""
s = ("epoch", "time", "loss", "metric(s)")
print("%12s" * len(s) % s)

def printepoch(self, epoch, loss, metrics):
"""Prints the epoch number, elapsed time, loss, and optional metrics for model training."""
s = (epoch, time.time() - self.t, loss)
if metrics is not None:
for i in range(len(metrics)):
Expand All @@ -122,6 +136,7 @@ def printepoch(self, epoch, loss, metrics):
self.t = time.time()

def final(self, msg):
"""Print final results and completion message with total elapsed time and best training epoch details."""
dt = time.time() - self.t0
print(
"%s\nFinished %g epochs in %.3fs (%.3f epochs/s). Best results:"
Expand Down

0 comments on commit 6267179

Please sign in to comment.