放弃了在t2s模型中使用@torch.jit.script,确保pytorch环境之间的兼容性

This commit is contained in:
ChasonJiang 2024-08-16 15:59:05 +08:00
parent f5a5f1890f
commit a3a9a53b9b

View File

@ -39,7 +39,7 @@ default_config = {
"EOS": 1024,
}
@torch.jit.script
# @torch.jit.script
# Efficient implementation equivalent to the following:
def scaled_dot_product_attention(query:torch.Tensor, key:torch.Tensor, value:torch.Tensor, attn_mask:Optional[torch.Tensor]=None, scale:Optional[torch.Tensor]=None) -> torch.Tensor:
B, H, L, S =query.size(0), query.size(1), query.size(-2), key.size(-2)
@ -82,7 +82,7 @@ class T2SMLP:
return x
@torch.jit.script
# @torch.jit.script
class T2SBlock:
def __init__(
self,
@ -218,7 +218,7 @@ class T2SBlock:
return x, k_cache, v_cache
@torch.jit.script
# @torch.jit.script
class T2STransformer:
def __init__(self, num_blocks : int, blocks: List[T2SBlock]):
self.num_blocks : int = num_blocks