-
Notifications
You must be signed in to change notification settings - Fork 0
/
models.py
27 lines (21 loc) · 896 Bytes
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import torch
import torch.nn as nn
class SimpleMLP(nn.Module):
def __init__(self, dim_in, dim_inner=128, dropout_rate=0.9):
super().__init__()
self.fc1 = nn.Linear(dim_in, dim_inner)
self.fc2 = nn.Linear(dim_inner, dim_inner)
self.fc3 = nn.Linear(dim_inner, 1)
# Activations are separated for shap explainability: https://github.com/slundberg/shap/issues/1678
self.activation_h1 = nn.Tanh()
self.activation_h2 = nn.Tanh()
self.activation_final = nn.Sigmoid()
self.dropout_rate: float = dropout_rate
self.dropout_layer = nn.Dropout(p=self.dropout_rate)
def forward(self, x):
x = self.activation_h1(self.fc1(x))
x = self.dropout_layer(x)
x = self.activation_h2(self.fc2(x))
x = self.dropout_layer(x)
x = self.fc3(x)
return self.activation_final(x)