From 0c5150a69c232ca5f612e899d97baf93bef991f5 Mon Sep 17 00:00:00 2001 From: Shashank Rajput Date: Tue, 26 Nov 2024 21:42:43 -0800 Subject: [PATCH] .. --- llmfoundry/models/layers/attention.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/llmfoundry/models/layers/attention.py b/llmfoundry/models/layers/attention.py index 55fc9be2d9..97d98f52f4 100644 --- a/llmfoundry/models/layers/attention.py +++ b/llmfoundry/models/layers/attention.py @@ -863,12 +863,6 @@ def __init__( 'flex_attn_config must be provided for flex attention.', ) self.flex_attn_config = flex_attn_config - self.compiled_flex_attention = self.flex_attn_config.pop( - 'compiled_flex_attention', - ) - self.compiled_create_block_mask = self.flex_attn_config.pop( - 'compiled_create_block_mask', - ) def forward( self,