Module minder_utils.models.feature_extractors
Expand source code
from .simclr.simclr import *
from .partial_order.partial_order import *
from .autoencoder import *
__all__ = ['SimCLR', 'Partial_Order', 'AutoEncoder']
Sub-modules
minder_utils.models.feature_extractors.autoencoder
minder_utils.models.feature_extractors.keras_autoencoders
minder_utils.models.feature_extractors.partial_order
minder_utils.models.feature_extractors.simclr
Classes
class AutoEncoder
-
Helper class that provides a standard way to create an ABC using inheritance.
Expand source code
class AutoEncoder(Feature_extractor): def __init__(self): super(AutoEncoder, self).__init__() self.encoder = Encoder(**self.config['model']) self.decoder = Decoder(**self.config['model']) self.model = nn.Sequential(self.encoder, self.decoder) self.criterion = nn.BCELoss() if self.config['loss']['func'] == 'bce' else nn.MSELoss() def forward(self, inputs): codes = self.encoder(inputs) decoded = self.decoder(codes) return codes, decoded def step(self, data): data, label = data if self.config['model']['base_model'] in ['nn']: data = data.view(data.size(0), -1) return self.criterion(self.decoder(self.encoder(data)), data)
Ancestors
- Feature_extractor
- abc.ABC
- torch.nn.modules.module.Module
Class variables
var dump_patches : bool
var training : bool
Methods
def step(self, data)
-
Expand source code
def step(self, data): data, label = data if self.config['model']['base_model'] in ['nn']: data = data.view(data.size(0), -1) return self.criterion(self.decoder(self.encoder(data)), data)
Inherited members
class Partial_Order
-
Helper class that provides a standard way to create an ABC using inheritance.
Expand source code
class Partial_Order(Feature_extractor): def __init__(self): super(Partial_Order, self).__init__() self.model = Partial_Order_Models(**self.config["model"]) self.criterion = Ranking(**self.config["loss"]) def _custom_loader(self, data): X, y = data return Partial_Order_Loader(X, y, **self.config['loader']) def step(self, data): pre_anchor, anchor, post_anchor = data loss = 0 for idx_day in range(len(post_anchor) - 1): loss += self._step(post_anchor[idx_day], post_anchor[idx_day + 1], anchor) loss += self._step(pre_anchor[idx_day], pre_anchor[idx_day + 1], anchor) return loss def _step(self, xi, xj, anchor): ris, zis = self.model(xi) rjs, zjs = self.model(xj) ras, zas = self.model(anchor) return self.criterion(zis, zjs, zas) @staticmethod def which_data(data): return data[0]
Ancestors
- Feature_extractor
- abc.ABC
- torch.nn.modules.module.Module
Class variables
var dump_patches : bool
var training : bool
Static methods
def which_data(data)
-
Expand source code
@staticmethod def which_data(data): return data[0]
Methods
def step(self, data)
-
Expand source code
def step(self, data): pre_anchor, anchor, post_anchor = data loss = 0 for idx_day in range(len(post_anchor) - 1): loss += self._step(post_anchor[idx_day], post_anchor[idx_day + 1], anchor) loss += self._step(pre_anchor[idx_day], pre_anchor[idx_day + 1], anchor) return loss
Inherited members
class SimCLR
-
Helper class that provides a standard way to create an ABC using inheritance.
Expand source code
class SimCLR(Feature_extractor): def __init__(self): super(SimCLR, self).__init__() self.nt_xent_criterion = NTXentLoss(self.device, self.config['loss']['temperature'], self.config['loss']['use_cosine_similarity']) self.model = ResNetSimCLR(**self.config["model"]).to(self.device) def step(self, data): (xis, xjs), _ = data ris, zis = self.model(xis) # [N,C] # get the representations and the projections rjs, zjs = self.model(xjs) # [N,C] # normalize projection feature vectors zis = F.normalize(zis, dim=1) zjs = F.normalize(zjs, dim=1) loss = self.nt_xent_criterion(zis, zjs) return loss def _step(self, model, xis, xjs, n_iter): # get the representations and the projections ris, zis = model(xis) # [N,C] # get the representations and the projections rjs, zjs = model(xjs) # [N,C] # normalize projection feature vectors zis = F.normalize(zis, dim=1) zjs = F.normalize(zjs, dim=1) loss = self.nt_xent_criterion(zis, zjs) return loss @staticmethod def which_data(data): return data[0]
Ancestors
- Feature_extractor
- abc.ABC
- torch.nn.modules.module.Module
Class variables
var dump_patches : bool
var training : bool
Static methods
def which_data(data)
-
Expand source code
@staticmethod def which_data(data): return data[0]
Methods
def step(self, data)
-
Expand source code
def step(self, data): (xis, xjs), _ = data ris, zis = self.model(xis) # [N,C] # get the representations and the projections rjs, zjs = self.model(xjs) # [N,C] # normalize projection feature vectors zis = F.normalize(zis, dim=1) zjs = F.normalize(zjs, dim=1) loss = self.nt_xent_criterion(zis, zjs) return loss
Inherited members