From 96ff0008a4775e9f08d2985a92cd374ca4e001e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=9D=CE=B1=CF=81=CE=BF=CF=85=CF=83=CE=AD=C2=B7=CE=BC?= =?UTF-8?q?=C2=B7=CE=B3=CE=B9=CE=BF=CF=85=CE=BC=CE=B5=CE=BC=CE=AF=C2=B7?= =?UTF-8?q?=CE=A7=CE=B9=CE=BD=CE=B1=CE=BA=CE=AC=CE=BD=CE=BD=CE=B1?= <40709280+NaruseMioShirakana@users.noreply.github.com> Date: Sat, 6 Apr 2024 15:47:53 +0800 Subject: [PATCH] Delete GPT_SoVITS/module/embedding_onnx.py --- GPT_SoVITS/module/embedding_onnx.py | 64 ----------------------------- 1 file changed, 64 deletions(-) delete mode 100644 GPT_SoVITS/module/embedding_onnx.py diff --git a/GPT_SoVITS/module/embedding_onnx.py b/GPT_SoVITS/module/embedding_onnx.py deleted file mode 100644 index 2f8dcf1b..00000000 --- a/GPT_SoVITS/module/embedding_onnx.py +++ /dev/null @@ -1,64 +0,0 @@ -# modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/embedding.py -import math - -import torch -from torch import nn - - -class TokenEmbedding(nn.Module): - def __init__( - self, - embedding_dim: int, - vocab_size: int, - dropout: float = 0.0, - ): - super().__init__() - - self.vocab_size = vocab_size - self.embedding_dim = embedding_dim - - self.dropout = torch.nn.Dropout(p=dropout) - self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim) - - @property - def weight(self) -> torch.Tensor: - return self.word_embeddings.weight - - def embedding(self, index: int) -> torch.Tensor: - return self.word_embeddings.weight[index : index + 1] - - def forward(self, x: torch.Tensor): - x = self.word_embeddings(x) - x = self.dropout(x) - return x - - -class SinePositionalEmbedding(nn.Module): - def __init__( - self, - embedding_dim: int, - dropout: float = 0.0, - scale: bool = False, - alpha: bool = False, - ): - super().__init__() - self.embedding_dim = embedding_dim - self.x_scale = math.sqrt(embedding_dim) if scale else 1.0 - self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha) - self.dropout = torch.nn.Dropout(p=dropout) - self.reverse = False - self.div_term = torch.exp(torch.arange(0, self.embedding_dim, 2) * -(math.log(10000.0) / self.embedding_dim)) - self.pe = self.extend_pe(2000) - - def extend_pe(self, x): - position = torch.cumsum(torch.ones((x,1)), dim=0) - scpe = (position * self.div_term).unsqueeze(0) - pe = torch.cat([torch.sin(scpe), torch.cos(scpe)]).permute(1, 2, 0) - pe = pe.contiguous().view(1, -1, self.embedding_dim) - return pe - - def forward(self, x: torch.Tensor) -> torch.Tensor: - pe = self.pe[:,:x.size(1),:] - output = x.unsqueeze(-1) if x.ndim == 2 else x - output = output * self.x_scale + self.alpha * pe - return self.dropout(output)