Skip to content
Snippets Groups Projects
Select Git revision
  • 6b8a17da0a37333dc1917e4ee7ce4e64090954b1
  • master default
  • object
  • develop protected
  • private_algos
  • cuisine
  • SMOTE
  • revert-76c4cca5
  • archive protected
  • no_graphviz
  • 0.0.1
11 results

Dataset.py

Blame
  • Networks.py 5.15 KiB
    import torch
    import torch.nn as nn
    import torch.nn.functional as F
    import Features
    
    ################################################################################
    class BaseNet(nn.Module):
      def __init__(self, dicts, outputSize, incremental) :
        super().__init__()
        self.dummyParam = nn.Parameter(torch.empty(0), requires_grad=False)
    
        self.incremental = incremental
        self.featureFunction = "b.-2 b.-1 b.0 b.1 b.2 s.0 s.1 s.2 s.0.0 s.0.-1 s.0.1 s.1.0 s.1.-1 s.1.1 s.2.0 s.2.-1 s.2.1"
        self.historyNb = 5
        self.columns = ["UPOS", "FORM"]
    
        self.embSize = 64
        self.nbTargets = len(self.featureFunction.split())
        self.inputSize = len(self.columns)*self.nbTargets+self.historyNb
        self.outputSize = outputSize
        for name in dicts.dicts :
          self.add_module("emb_"+name, nn.Embedding(len(dicts.dicts[name]), self.embSize))
        self.fc1 = nn.Linear(self.inputSize * self.embSize, 1600)
        self.fc2 = nn.Linear(1600, outputSize)
        self.dropout = nn.Dropout(0.3)
    
        self.apply(self.initWeights)
    
      def forward(self, x) :
        embeddings = []
        for i in range(len(self.columns)) :
          embeddings.append(getattr(self, "emb_"+self.columns[i])(x[...,i*self.nbTargets:(i+1)*self.nbTargets]))
        y = torch.cat(embeddings,-1).view(x.size(0),-1)
        if self.historyNb > 0 :
          historyEmb = getattr(self, "emb_HISTORY")(x[...,len(self.columns)*self.nbTargets:len(self.columns)*self.nbTargets+self.historyNb]).view(x.size(0),-1)
          y = torch.cat([y, historyEmb],-1)
        y = self.dropout(y)
        y = F.relu(self.dropout(self.fc1(y)))
        y = self.fc2(y)
        return y
    
      def currentDevice(self) :
        return self.dummyParam.device
    
      def initWeights(self,m) :
        if type(m) == nn.Linear:
          torch.nn.init.xavier_uniform_(m.weight)
          m.bias.data.fill_(0.01)
    
      def extractFeatures(self, dicts, config) :
        colsValues = Features.extractColsFeatures(dicts, config, self.featureFunction, self.columns, self.incremental)
        historyValues = Features.extractHistoryFeatures(dicts, config, self.historyNb)
        return torch.cat([colsValues, historyValues])
    
    ################################################################################
    
    ################################################################################
    class LSTMNet(nn.Module):
      def __init__(self, dicts, outputSize, incremental) :
        super().__init__()
        self.dummyParam = nn.Parameter(torch.empty(0), requires_grad=False)
    
        self.incremental = incremental
        self.featureFunctionLSTM = "b.-2 b.-1 b.0 b.1 b.2"
        self.featureFunction = "s.0 s.1 s.2 s.0.0 s.0.-1 s.0.1 s.1.0 s.1.-1 s.1.1 s.2.0 s.2.-1 s.2.1"
        self.historyNb = 5
        self.columns = ["UPOS", "FORM"]
    
        self.embSize = 64
        self.nbInputLSTM = len(self.featureFunctionLSTM.split())
        self.nbInputBase = len(self.featureFunction.split())
        self.nbTargets = self.nbInputBase + self.nbInputLSTM
        self.inputSize = len(self.columns)*self.nbTargets+self.historyNb
        self.outputSize = outputSize
        for name in dicts.dicts :
          self.add_module("emb_"+name, nn.Embedding(len(dicts.dicts[name]), self.embSize))
        self.lstmFeat = nn.LSTM(len(self.columns)*self.embSize, len(self.columns)*int(self.embSize/2), 1, batch_first=True, bidirectional = True)
        self.lstmHist = nn.LSTM(self.embSize, int(self.embSize/2), 1, batch_first=True, bidirectional = True)
        self.fc1 = nn.Linear(self.inputSize * self.embSize, 1600)
        self.fc2 = nn.Linear(1600, outputSize)
        self.dropout = nn.Dropout(0.3)
    
        self.apply(self.initWeights)
    
      def forward(self, x) :
        embeddings = []
        embeddingsLSTM = []
        for i in range(len(self.columns)) :
          embeddings.append(getattr(self, "emb_"+self.columns[i])(x[...,i*self.nbInputBase:(i+1)*self.nbInputBase]))
        for i in range(len(self.columns)) :
          embeddingsLSTM.append(getattr(self, "emb_"+self.columns[i])(x[...,len(self.columns)*self.nbInputBase+i*self.nbInputLSTM:len(self.columns)*self.nbInputBase+(i+1)*self.nbInputLSTM]))
    
        z = torch.cat(embeddingsLSTM,-1)
        z = self.lstmFeat(z)[0]
        z = z.reshape(x.size(0), -1)
        y = torch.cat(embeddings,-1).reshape(x.size(0),-1)
        y = torch.cat([y,z], -1)
        if self.historyNb > 0 :
          historyEmb = getattr(self, "emb_HISTORY")(x[...,len(self.columns)*self.nbTargets:len(self.columns)*self.nbTargets+self.historyNb])
          historyEmb = self.lstmHist(historyEmb)[0]
          historyEmb = historyEmb.reshape(x.size(0), -1)
          y = torch.cat([y, historyEmb],-1)
        y = self.dropout(y)
        y = F.relu(self.dropout(self.fc1(y)))
        y = self.fc2(y)
        return y
    
      def currentDevice(self) :
        return self.dummyParam.device
    
      def initWeights(self,m) :
        if type(m) == nn.Linear:
          torch.nn.init.xavier_uniform_(m.weight)
          m.bias.data.fill_(0.01)
    
      def extractFeatures(self, dicts, config) :
        colsValuesBase = Features.extractColsFeatures(dicts, config, self.featureFunction, self.columns, self.incremental)
        colsValuesLSTM = Features.extractColsFeatures(dicts, config, self.featureFunctionLSTM, self.columns, self.incremental)
        historyValues = Features.extractHistoryFeatures(dicts, config, self.historyNb)
        return torch.cat([colsValuesBase, colsValuesLSTM, historyValues])
    ################################################################################