Skip to content

Commit

Permalink
Feat/refactor incident alert (#619)
Browse files Browse the repository at this point in the history
  • Loading branch information
gcharest authored Aug 9, 2024
1 parent 78f72e6 commit 6f19015
Show file tree
Hide file tree
Showing 14 changed files with 2,328 additions and 2,144 deletions.
314 changes: 0 additions & 314 deletions app/integrations/google_drive.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import base64
import logging
import datetime
import re

from dotenv import load_dotenv
from googleapiclient.discovery import build
Expand Down Expand Up @@ -46,39 +45,6 @@ def get_google_service(service, version):
return build(service, version, credentials=creds)


def add_metadata(file_id, key, value):
service = get_google_service("drive", "v3")
result = (
service.files()
.update(
fileId=file_id,
body={"appProperties": {key: value}},
fields="name, appProperties",
supportsAllDrives=True,
)
.execute()
)
return result


def create_folder(name):
service = get_google_service("drive", "v3")
results = (
service.files()
.create(
body={
"name": name,
"mimeType": "application/vnd.google-apps.folder",
"parents": [SRE_INCIDENT_FOLDER],
},
supportsAllDrives=True,
fields="name",
)
.execute()
)
return f"Created folder {results['name']}"


def create_new_folder(name, parent_folder):
# Creates a new folder in the parent_folder directory
service = get_google_service("drive", "v3")
Expand Down Expand Up @@ -141,21 +107,6 @@ def copy_file_to_folder(file_id, name, parent_folder_id, destination_folder_id):
return updated_file["id"]


def delete_metadata(file_id, key):
service = get_google_service("drive", "v3")
result = (
service.files()
.update(
fileId=file_id,
body={"appProperties": {key: None}},
fields="name, appProperties",
supportsAllDrives=True,
)
.execute()
)
return result


def create_new_docs_file(name, parent_folder_id):
# Creates a new google docs file in the parent_folder directory
service = get_google_service("drive", "v3")
Expand Down Expand Up @@ -194,24 +145,6 @@ def create_new_sheets_file(name, parent_folder_id):
return results["id"]


def get_document_by_channel_name(channel_name):
service = get_google_service("drive", "v3")
results = (
service.files()
.list(
pageSize=1,
supportsAllDrives=True,
includeItemsFromAllDrives=True,
corpora="drive",
q="trashed=false and name='{}'".format(channel_name),
driveId=SRE_DRIVE_ID,
fields="files(appProperties, id, name)",
)
.execute()
)
return results.get("files", [])


def list_folders():
service = get_google_service("drive", "v3")
results = (
Expand Down Expand Up @@ -295,223 +228,6 @@ def merge_data(file_id, name, product, slack_channel, on_call_names):
return result


def get_timeline_section(document_id):
# Retrieve the document
service = get_google_service("docs", "v1")
document = service.documents().get(documentId=document_id).execute()
content = document.get("body").get("content")

timeline_content = ""
record = False
found_start = False
found_end = False

# Iterate through the elements of the document
for element in content:
if "paragraph" in element:
paragraph_elements = element.get("paragraph").get("elements")
for elem in paragraph_elements:
text_run = elem.get("textRun")
if text_run:
text = text_run.get("content")
textStyle = text_run.get("textStyle", {})
if "link" in textStyle:
# Extract link URL
link = textStyle["link"].get("url")
# Format the text with the link as Markdown
formatted_text = f"[{text.strip()}]({link})"
# Replace the text with the formatted text
text = formatted_text
if START_HEADING in text:
record = True
found_start = True
elif END_HEADING in text:
found_end = True
if found_start:
return timeline_content
elif record:
timeline_content += text

# Return None if either START_HEADING or END_HEADING not found
return None if not (found_start and found_end) else timeline_content


def find_heading_indices(content, start_heading, end_heading):
"""Find the start and end indices of content between two headings."""
start_index, end_index = None, None
for element in content:
if "paragraph" in element:
text_runs = element["paragraph"].get("elements", [])
for text_run in text_runs:
text = text_run.get("textRun", {}).get("content", "")
if start_heading in text:
start_index = text_run.get("endIndex")
elif end_heading in text and start_index is not None:
end_index = text_run.get("startIndex")
return start_index, end_index
return start_index, end_index


# Replace the text between the headings
def replace_text_between_headings(doc_id, new_content, start_heading, end_heading):
# Setup the service
service = get_google_service("docs", "v1")

# Retrieve the document content
document = service.documents().get(documentId=doc_id).execute()
content = document.get("body").get("content")

# Find the start and end indices
start_index, end_index = find_heading_indices(content, start_heading, end_heading)

if start_index is not None and end_index is not None:
# Delete the existing content from the document
requests = [
{
"deleteContentRange": {
"range": {"startIndex": start_index, "endIndex": end_index}
}
}
]

# split the formatted content by the emoji
line = new_content.split(" ➡ ")
pattern = r"\[([^\]]+)\]\(([^)]+)\)\s([^:]+):\s(.+)"
insert_index = start_index
inserted_content = ""

# Insert an empty line before the new content and after the placeholder text
text_to_insert = "\n"
text_len = len(text_to_insert)
requests.append(
{
"insertText": {
"location": {"index": insert_index},
"text": text_to_insert,
}
}
)
# udpate the insert index
insert_index += text_len

for item in line:
# split the item by the emoji and strip out any empty strings
original_entries = item.split("➡️ ")
entries = [entry for entry in original_entries if entry.strip()]

for entry in entries:
# Regular expression to match the entry pattern
pattern = r"\[(?P<date>.+?) ET\]\((?P<url>.+?)\) (?P<name>.+?): (?P<message>.+)$"

# Use re.DOTALL to make '.' match newline characters as well. This is needed for multi-line messages
match = re.match(pattern, entry, re.DOTALL)

if match:
# Extract components from the match object
date = match.group("date") + " ET"
url = match.group("url")
name = match.group("name")
message = match.group(
"message"
).strip() # Remove leading/trailing whitespace

# Construct the text to be inserted with the date as a link
text_to_insert = f" ➡️ {date} {name}: {message}\n"
text_len = len(text_to_insert)
inserted_content += text_to_insert

# Insert text request
requests.append(
{
"insertText": {
"location": {"index": insert_index},
"text": text_to_insert,
}
}
)
# Update link style for date_text
requests.append(
{
"updateTextStyle": {
"range": {
"startIndex": insert_index + 4,
"endIndex": insert_index + len(date) + 4,
},
"textStyle": {"link": {"url": url}},
"fields": "link",
}
}
)
# Update for next insertion
insert_index += text_len
else:
# if we don't match the above pattern, just insert the entry as is
text_to_insert = f" ➡️ {item}\n"
inserted_content += text_to_insert
text_len = len(text_to_insert)
# Insert text request for the entire block of formatted_content
requests.append(
{
"insertText": {
"location": {"index": insert_index},
"text": text_to_insert,
}
}
)

# Update insert_index as needed, assuming formatted_content is a single block of text
insert_index += text_len

# Make sure that we do normal formatting for the inserted content
requests.append(
{
"updateParagraphStyle": {
"range": {
"startIndex": start_index,
"endIndex": (start_index + len(inserted_content)),
},
"paragraphStyle": {"namedStyleType": "NORMAL_TEXT"},
"fields": "namedStyleType",
}
}
)
service.documents().batchUpdate(
documentId=doc_id, body={"requests": requests}
).execute()
else:
logging.warning("Headings not found")


# Update the incident document with status of "Closed"
def close_incident_document(file_id):
# List of possible statuses to be replaced
possible_statuses = ["In Progress", "Open", "Ready to be Reviewed", "Reviewed"]

# Replace all possible statuses with "Closed"
changes = {
"requests": [
{
"replaceAllText": {
"containsText": {"text": f"Status: {status}", "matchCase": "false"},
"replaceText": "Status: Closed",
}
}
for status in possible_statuses
]
}
# Execute the batchUpdate request
service = get_google_service("docs", "v1")
result = (
service.documents()
.batchUpdate(
documentId=file_id,
body=changes,
)
.execute()
)
return result


def update_incident_list(document_link, name, slug, product, channel_url):
service = get_google_service("sheets", "v4")
list = [
Expand Down Expand Up @@ -540,36 +256,6 @@ def update_incident_list(document_link, name, slug, product, channel_url):
return result


def update_spreadsheet_close_incident(channel_name):
# Find the row in the spreadsheet with the channel_name and update it's status to Closed
# Read the data from the sheet
service = get_google_service("sheets", "v4")
sheet_name = "Sheet1"
result = (
service.spreadsheets()
.values()
.get(spreadsheetId=INCIDENT_LIST, range=sheet_name)
.execute()
)
values = result.get("values", [])
# Find the row with the search value
for i, row in enumerate(values):
if channel_name in row:
# Update the 4th column (index 3) of the found row
update_range = (
f"{sheet_name}!D{i+1}" # Column D, Rows are 1-indexed in Sheets
)
body = {"values": [["Closed"]]}
service.spreadsheets().values().update(
spreadsheetId=INCIDENT_LIST,
range=update_range,
valueInputOption="USER_ENTERED",
body=body,
).execute()
return True
return False


def healthcheck():
"""Check if the bot can interact with Google Drive."""
healthy = False
Expand Down
2 changes: 1 addition & 1 deletion app/integrations/google_workspace/google_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def batch_update(document_id: str, requests: list) -> dict:


@handle_google_api_errors
def get(document_id: str) -> dict:
def get_document(document_id: str) -> dict:
"""Gets a document from Google Docs.
Args:
Expand Down
Loading

0 comments on commit 6f19015

Please sign in to comment.