Skip to content

Commit

Permalink
Use 💾 emoji to save to the incident timeline (#383)
Browse files Browse the repository at this point in the history
* Adding files for the emoji button to save to incident timeline button

* Adding unit tests

* Adding all the unit tests

* Tweaking unit tests - mostly changing the names so that they are more descriptive

* Fixing linting

* Changing code so that only floppy disk reactions are detected

* Making changes based on Pat's recommendations

* Fixing linting

* Fixing formatting

* Update app/commands/incident.py

Co-authored-by: Pat Heard <[email protected]>

---------

Co-authored-by: Pat Heard <[email protected]>
  • Loading branch information
sylviamclaughlin and patheard authored Jan 29, 2024
1 parent 60f1497 commit bdcb530
Show file tree
Hide file tree
Showing 10 changed files with 1,093 additions and 15 deletions.
15 changes: 1 addition & 14 deletions app/commands/helpers/incident_helper.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
import json
import re
import logging
from integrations import google_drive
from commands.utils import get_stale_channels, log_to_sentinel
from commands.utils import get_stale_channels, log_to_sentinel, extract_google_doc_id

help_text = """
\n `/sre incident create-folder <folder_name>`
Expand Down Expand Up @@ -493,18 +492,6 @@ def metadata_items(folder):
]


def extract_google_doc_id(url):
# Regular expression pattern to match Google Docs ID
pattern = r"/d/([a-zA-Z0-9_-]+)/"

# Search in the given text for all occurences of pattern
match = re.search(pattern, url)
if match:
return match.group(1)
else:
return None


def return_channel_name(input_str):
# return the channel name without the incident- prefix and appending a # to the channel name
prefix = "incident-"
Expand Down
170 changes: 169 additions & 1 deletion app/commands/incident.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,17 @@

from integrations import google_drive, opsgenie
from models import webhooks
from commands.utils import log_to_sentinel, get_user_locale
from commands.utils import (
log_to_sentinel,
get_user_locale,
rearrange_by_datetime_ascending,
convert_epoch_to_datetime_est,
extract_google_doc_id,
)
from integrations.google_drive import (
get_timeline_section,
replace_text_between_headings,
)

from dotenv import load_dotenv

Expand All @@ -18,6 +28,8 @@

INCIDENT_CHANNEL = os.environ.get("INCIDENT_CHANNEL")
SLACK_SECURITY_USER_GROUP_ID = os.environ.get("SLACK_SECURITY_USER_GROUP_ID")
START_HEADING = "Detailed Timeline"
END_HEADING = "Trigger"


def handle_incident_action_buttons(client, ack, body, logger):
Expand Down Expand Up @@ -361,3 +373,159 @@ def generate_success_modal(body):
},
],
}


def handle_reaction_added(client, ack, body, logger):
ack()
# get the channel in which the reaction was used
channel_id = body["event"]["item"]["channel"]
# Get the channel name which requires us to use the conversations_info API call
channel_name = client.conversations_info(channel=channel_id)["channel"]["name"]

# if the emoji added is a floppy disk emoji and we are in an incident channel, then add the message to the incident timeline
if channel_name.startswith("incident-"):
# get the message from the conversation
try:
# get the messages from the conversation and incident channel
messages = return_messages(client, body, channel_id)

# get the incident report document id from the incident channel
# get and update the incident document
document_id = ""
response = client.bookmarks_list(channel_id=channel_id)
if response["ok"]:
for item in range(len(response["bookmarks"])):
if response["bookmarks"][item]["title"] == "Incident report":
document_id = extract_google_doc_id(
response["bookmarks"][item]["link"]
)
if document_id == "":
logger.error("No incident document found for this channel.")

for message in messages:
# convert the time which is now in epoch time to standard ET Time
message_date_time = convert_epoch_to_datetime_est(message["ts"])
# get the user name from the message
user = client.users_profile_get(user=message["user"])
# get the full name of the user so that we include it into the timeline
user_full_name = user["profile"]["real_name"]

# get the current timeline section content
content = get_timeline_section(document_id)

# if the message already exists in the timeline, then don't put it there again
if content and message_date_time not in content:
# append the new message to the content
content += (
f"{message_date_time} {user_full_name}: {message['text']}"
)

# if there is an image in the message, then add it to the timeline
if "files" in message:
image = message["files"][0]["url_private"]
content += f"\nImage: {image}"

# sort all the message to be in ascending chronological order
sorted_content = rearrange_by_datetime_ascending(content)

# replace the content in the file with the new headings
replace_text_between_headings(
document_id, sorted_content, START_HEADING, END_HEADING
)
except Exception as e:
logger.error(e)


# Execute this function when a reaction was removed
def handle_reaction_removed(client, ack, body, logger):
ack()
# get the channel id
channel_id = body["event"]["item"]["channel"]

# Get the channel name which requires us to use the conversations_info API call
result = client.conversations_info(channel=channel_id)
channel_name = result["channel"]["name"]

if channel_name.startswith("incident-"):
try:
messages = return_messages(client, body, channel_id)

if not messages:
logger.warning("No messages found")
return
# get the message we want to delete
message = messages[0]

# convert the epoch time to standard ET day/time
message_date_time = convert_epoch_to_datetime_est(message["ts"])

# get the user of the person that send the message
user = client.users_profile_get(user=message["user"])
# get the user's full name
user_full_name = user["profile"]["real_name"]

# get the incident report document id from the incident channel
# get and update the incident document
document_id = ""
response = client.bookmarks_list(channel_id=channel_id)
if response["ok"]:
for item in range(len(response["bookmarks"])):
if response["bookmarks"][item]["title"] == "Incident report":
document_id = extract_google_doc_id(
response["bookmarks"][item]["link"]
)
if document_id == "":
logger.error("No incident document found for this channel.")

# Retrieve the current content of the timeline
content = get_timeline_section(document_id)

# Construct the message to remove
message_to_remove = (
f"\n{message_date_time} {user_full_name}: {message['text']}\n"
)
# if there is a file in the message, then add it to the message to remove
if "files" in message:
image = message["files"][0]["url_private"]
message_to_remove += f"\nImage: {image}"

# Remove the message
if message_to_remove in content:
content = content.replace(message_to_remove, "")

# Update the timeline content
result = replace_text_between_headings(
document_id,
content,
START_HEADING,
END_HEADING,
)
else:
logger.warning("Message not found in the timeline")
return
except Exception as e:
logger.error(e)


# Function to return the messages from the conversation
def return_messages(client, body, channel_id):
# Fetch the message that had the reaction removed
result = client.conversations_history(
channel=channel_id,
limit=1,
inclusive=True,
oldest=body["event"]["item"]["ts"],
)
# get the messages
messages = result["messages"]
# if the lenght is 0, then the message is part of a thread, so get the message from the thread
if messages.__len__() == 0:
# get thread messages
result = client.conversations_replies(
channel=channel_id,
ts=body["event"]["item"]["ts"],
inclusive=True,
include_all_metadata=True,
)
messages = result["messages"]
return messages
99 changes: 99 additions & 0 deletions app/commands/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
import time
from datetime import datetime, timedelta
from integrations.sentinel import send_event
import re
import pytz

logging.basicConfig(level=logging.INFO)

Expand Down Expand Up @@ -124,3 +126,100 @@ def get_user_locale(user_id, client):
if user_locale["ok"] and (user_locale["user"]["locale"] in supported_locales):
return user_locale["user"]["locale"]
return default_locale


def rearrange_by_datetime_ascending(text):
# Split the text by lines
lines = text.split("\n")

# Temporary storage for multiline entries
entries = []
current_entry = []

# Iterate over each line
for line in lines:
# Check if the line starts with a datetime format including 'ET'
if re.match(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} ET", line):
if current_entry:
# Combine the lines in current_entry and add to entries
entries.append("\n".join(current_entry))
current_entry = [line]
else:
current_entry.append(line)
else:
# If not a datetime, it's a continuation of the previous message
current_entry.append(line)

# Add the last entry
if current_entry:
if current_entry.__len__() > 1:
# that means we have a multiline entry
joined_current_entry = "\n".join(current_entry)
entries.append(joined_current_entry)
else:
entries.append("\n".join(current_entry))

# Now extract date, time, and message from each entry
dated_entries = []
for entry in entries:
match = re.match(
r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} ET):?[\s,]*(.*)", entry, re.DOTALL
)
if match:
date_str, msg = match.groups()
# Parse the datetime string (ignoring 'ET' for parsing)
dt = datetime.strptime(date_str[:-3].strip(), "%Y-%m-%d %H:%M:%S")
dated_entries.append((dt, msg))

# Sort the entries by datetime in ascending order
sorted_entries = sorted(dated_entries, key=lambda x: x[0], reverse=False)

# Reformat the entries back into strings, including 'ET'
sorted_text = "\n".join(
[
f"{entry[0].strftime('%Y-%m-%d %H:%M:%S')} ET {entry[1]}"
for entry in sorted_entries
]
)

return sorted_text


def convert_epoch_to_datetime_est(epoch_time):
"""
Convert an epoch time to a standard date/time format in Eastern Standard Time (ET).
Args:
epoch_time (float): The epoch time.
Returns:
str: The corresponding date and time in the format YYYY-MM-DD HH:MM:SS ET.
"""
# Define the Eastern Standard Timezone
est = pytz.timezone("US/Eastern")

# Convert epoch time to a datetime object in UTC
utc_datetime = datetime.utcfromtimestamp(float(epoch_time))

# Convert UTC datetime object to ET
est_datetime = utc_datetime.replace(tzinfo=pytz.utc).astimezone(est)

# Format the datetime object to a string in the desired format with 'ET' at the end
return est_datetime.strftime("%Y-%m-%d %H:%M:%S") + " ET"


def extract_google_doc_id(url):
# if the url is empty or None, then log an error
if not url:
logging.error("URL is empty or None")
return None

# Regular expression pattern to match Google Docs ID
pattern = r"https://docs.google.com/document/d/([a-zA-Z0-9_-]+)/"

# Search in the given text for all occurences of pattern
match = re.search(pattern, url)
if match:
return match.group(1)
else:
return None
Loading

0 comments on commit bdcb530

Please sign in to comment.