Quellcode durchsuchen

saving the qk matrix in the attention module for convenience

Jong Wook Kim vor 2 Jahren
Ursprung
Commit
68e44bd83c
1 geänderte Dateien mit 3 neuen und 0 gelöschten Zeilen
  1. 3 0
      whisper/model.py

+ 3 - 0
whisper/model.py

@@ -62,6 +62,7 @@ class MultiHeadAttention(nn.Module):
         self.key = Linear(n_state, n_state, bias=False)
         self.value = Linear(n_state, n_state)
         self.out = Linear(n_state, n_state)
+        self.last_qk = None
 
     def forward(
         self,
@@ -96,6 +97,8 @@ class MultiHeadAttention(nn.Module):
         if mask is not None:
             qk = qk + mask[:n_ctx, :n_ctx]
 
+        self.last_qk = qk.detach()
+
         w = F.softmax(qk.float(), dim=-1).to(q.dtype)
         return (w @ v).permute(0, 2, 1, 3).flatten(start_dim=2)