Module minder_utils.models.feature_extractors.partial_order.basic
Expand source code
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self, base_model, input_dim, latent_dim):
super(Encoder, self).__init__()
# Encoder
if base_model == 'conv':
self.encoder = nn.Sequential(
nn.Conv2d(3, 8, kernel_size=2, padding=1),
nn.Tanh(),
nn.Conv2d(8, 16, kernel_size=2, padding=1),
nn.Tanh(),
nn.Conv2d(16, 8, kernel_size=2),
nn.Tanh(),
nn.Conv2d(8, 3, kernel_size=2),
nn.Tanh(),
nn.Flatten(),
nn.Linear(input_dim, latent_dim)
)
else:
self.encoder = nn.Sequential(
nn.Flatten(),
nn.Linear(input_dim, 256),
nn.Tanh(),
nn.Linear(256, 128),
nn.Tanh(),
nn.Linear(128, 64),
nn.Tanh(),
nn.Linear(64, latent_dim),
nn.Tanh()
)
def forward(self, inputs):
codes = self.encoder(inputs)
return codes
class Partial_Order_Models(nn.Module):
def __init__(self, base_model, input_dim, out_dim, latent_dim, **kwargs):
super(Partial_Order_Models, self).__init__()
self.features = Encoder(base_model, input_dim, latent_dim)
# projection MLP
self.l1 = nn.Linear(latent_dim, latent_dim)
self.l2 = nn.Linear(latent_dim, out_dim)
def forward(self, x):
h = self.features(x)
h = nn.Flatten()(h)
x = self.l1(h)
x = F.relu(x)
x = self.l2(x)
return h, x
Classes
class Encoder (base_model, input_dim, latent_dim)
-
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:
to
, etc.:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class Encoder(nn.Module): def __init__(self, base_model, input_dim, latent_dim): super(Encoder, self).__init__() # Encoder if base_model == 'conv': self.encoder = nn.Sequential( nn.Conv2d(3, 8, kernel_size=2, padding=1), nn.Tanh(), nn.Conv2d(8, 16, kernel_size=2, padding=1), nn.Tanh(), nn.Conv2d(16, 8, kernel_size=2), nn.Tanh(), nn.Conv2d(8, 3, kernel_size=2), nn.Tanh(), nn.Flatten(), nn.Linear(input_dim, latent_dim) ) else: self.encoder = nn.Sequential( nn.Flatten(), nn.Linear(input_dim, 256), nn.Tanh(), nn.Linear(256, 128), nn.Tanh(), nn.Linear(128, 64), nn.Tanh(), nn.Linear(64, latent_dim), nn.Tanh() ) def forward(self, inputs): codes = self.encoder(inputs) return codes
Ancestors
- torch.nn.modules.module.Module
Class variables
var dump_patches : bool
var training : bool
Methods
def forward(self, inputs) ‑> Callable[..., Any]
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.Expand source code
def forward(self, inputs): codes = self.encoder(inputs) return codes
class Partial_Order_Models (base_model, input_dim, out_dim, latent_dim, **kwargs)
-
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:
to
, etc.:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class Partial_Order_Models(nn.Module): def __init__(self, base_model, input_dim, out_dim, latent_dim, **kwargs): super(Partial_Order_Models, self).__init__() self.features = Encoder(base_model, input_dim, latent_dim) # projection MLP self.l1 = nn.Linear(latent_dim, latent_dim) self.l2 = nn.Linear(latent_dim, out_dim) def forward(self, x): h = self.features(x) h = nn.Flatten()(h) x = self.l1(h) x = F.relu(x) x = self.l2(x) return h, x
Ancestors
- torch.nn.modules.module.Module
Class variables
var dump_patches : bool
var training : bool
Methods
def forward(self, x) ‑> Callable[..., Any]
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.Expand source code
def forward(self, x): h = self.features(x) h = nn.Flatten()(h) x = self.l1(h) x = F.relu(x) x = self.l2(x) return h, x