Update hieradet.py

Not used  
head_dim = dim_out // num_heads
self.scale = head_dim**-0.5

F.scaled_dot_product_attention takes care of this automatically.
This commit is contained in:
Arun
2024-08-07 11:35:46 +05:30
committed by GitHub
parent 511199d7a9
commit 6ec8560436

View File

@@ -46,10 +46,7 @@ class MultiScaleAttention(nn.Module):
self.dim = dim
self.dim_out = dim_out
self.num_heads = num_heads
head_dim = dim_out // num_heads
self.scale = head_dim**-0.5
self.q_pool = q_pool
self.qkv = nn.Linear(dim, dim_out * 3)