Skip to content

Commit

Permalink
solve linting issues and fix test
Browse files Browse the repository at this point in the history
  • Loading branch information
Friendly-Banana authored and jakobkoerber committed Sep 20, 2024
1 parent fc93c11 commit 4114cc3
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 32 deletions.
59 changes: 30 additions & 29 deletions src/menu_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class MenuParser(ABC):
"""

canteens: Set[Canteen]
_label_lookup: Dict[str, Set[Label]]
_label_subclasses: Dict[str, Set[Label]]
# we use datetime %u, so we go from 1-7
weekday_positions: Dict[str, int] = {"mon": 1, "tue": 2, "wed": 3, "thu": 4, "fri": 5, "sat": 6, "sun": 7}

Expand All @@ -54,7 +54,7 @@ def _parse_label(cls, labels_str: str) -> Set[Label]:
for value in split_values:
stripped = value.strip()
if not stripped.isspace():
labels |= cls._label_lookup.get(stripped, set())
labels |= cls._label_subclasses.get(stripped, set())
Label.add_supertype_labels(labels)
return labels

Expand Down Expand Up @@ -112,7 +112,7 @@ def __init__(self, students: float, staff: float, guests: float):
self.guests = guests
self.unit = "100g"

_label_lookup: Dict[str, Set[Label]] = {
_label_subclasses: Dict[str, Set[Label]] = {
"GQB": {Label.BAVARIA},
"MSC": {Label.MSC},
"1": {Label.DYESTUFF},
Expand Down Expand Up @@ -192,8 +192,8 @@ def __init__(self, students: float, staff: float, guests: float):

@staticmethod
def __get_self_service_prices(
base_price_type: SelfServiceBasePriceType,
price_per_unit_type: SelfServicePricePerUnitType,
base_price_type: SelfServiceBasePriceType,
price_per_unit_type: SelfServicePricePerUnitType,
) -> Prices:
students: Price = Price(
base_price_type.price[0],
Expand Down Expand Up @@ -241,7 +241,7 @@ def __get_price(canteen: Canteen, dish: Tuple[str, str, str, str, str], dish_nam
base_price_type = StudentenwerkMenuParser.SelfServiceBasePriceType.PIZZA_VEGIE
return StudentenwerkMenuParser.__get_self_service_prices(base_price_type, price_per_unit_type)

base_url: str = "http://www.studierendenwerk-muenchen-oberbayern.de/mensa/speiseplan/speiseplan_{url_id}_-de.html"
base_url: str = "https://www.studierendenwerk-muenchen-oberbayern.de/mensa/speiseplan/speiseplan_{url_id}_-de.html"

def parse(self, canteen: Canteen) -> Optional[Dict[datetime.date, Menu]]:
menus = {}
Expand All @@ -250,8 +250,10 @@ def parse(self, canteen: Canteen) -> Optional[Dict[datetime.date, Menu]]:
if page.ok:
try:
tree: html.Element = html.fromstring(page.content)
html_menus: List[html.Element] = self.__get_daily_menus_as_html(tree)
html_menus: List[html.Element] = self.get_daily_menus_as_html(tree)
for html_menu in html_menus:
# this solves some weird reference? issue where tree.xpath will subsequently always use
# the first element despite looping through seemingly separate elements
html_menu = html.fromstring(html.tostring(html_menu))
menu = self.get_menu(html_menu, canteen)
if menu:
Expand All @@ -263,27 +265,27 @@ def parse(self, canteen: Canteen) -> Optional[Dict[datetime.date, Menu]]:
return menus

def get_menu(self, page: html.Element, canteen: Canteen) -> Optional[Menu]:
# extract date
date = self.extract_date_from_html(page)
# parse dishes of current menu
dishes: List[Dish] = self.__parse_dishes(page, canteen)
# create menu object
menu: Menu = Menu(date, dishes)
return menu

# public for testing
def extract_date_from_html(self, tree: html.Element) -> Optional[datetime.date]:
@staticmethod
def extract_date_from_html(tree: html.Element) -> Optional[datetime.date]:
date_str: str = tree.xpath("//div[@class='c-schedule__item']//strong/text()")[0]
try:
date: datetime.date = util.parse_date(date_str)
return date
except ValueError:
print(f"Warning: Error during parsing date from html page. Problematic date: {date_str}")
warn(f"Error during parsing date from html page. Problematic date: {date_str}")
return None

# public for testing
@staticmethod
def __get_daily_menus_as_html(tree: html.Element) -> List[html.Element]:
def get_daily_menus_as_html(tree: html.Element) -> List[html.Element]:
# obtain all daily menus found in the passed html page by xpath query
daily_menus: List[html.Element] = tree.xpath("//div[@class='c-schedule__item']") # type: ignore
daily_menus: List[html.Element] = tree.xpath("//div[@class='c-schedule__item']")
return daily_menus

@staticmethod
Expand Down Expand Up @@ -328,12 +330,12 @@ def __parse_dishes(menu_html: html.Element, canteen: Canteen) -> List[Dish]:
dish_markers_meetless,
)
for (
dish_name,
dish_type,
dish_marker_additional,
dish_marker_allergen,
dish_marker_type,
dish_marker_meetless,
dish_name,
dish_type,
dish_marker_additional,
dish_marker_allergen,
dish_marker_type,
dish_marker_meetless,
) in dishes_tup:
dishes_dict[dish_name] = (
dish_type,
Expand Down Expand Up @@ -389,8 +391,7 @@ class DishType(Enum):
VEGETARIAN = auto()
VEGAN = auto()

# if an label is a subclass of another label,
_label_lookup: Dict[str, Set[Label]] = {
_label_subclasses: Dict[str, Set[Label]] = {
"a": {Label.GLUTEN},
"aW": {Label.WHEAT},
"aR": {Label.RYE},
Expand Down Expand Up @@ -535,7 +536,7 @@ def __get_label_str_and_price(self, column_index: int, line: str) -> Optional[Tu
# However, according to
# https://black.readthedocs.io/en/stable/faq.html#why-are-flake8-s-e203-and-w503-violated,
# this is against PEP8
line[estimated_column_end - delta: min(estimated_column_end + delta, len(line))], # noqa: E203
line[estimated_column_end - delta : min(estimated_column_end + delta, len(line))], # noqa: E203
)[0]
except IndexError:
return None
Expand All @@ -547,7 +548,7 @@ def __get_label_str_and_price(self, column_index: int, line: str) -> Optional[Tu
# However, according to
# https://black.readthedocs.io/en/stable/faq.html#why-are-flake8-s-e203-and-w503-violated,
# this is against PEP8
line[max(estimated_column_begin - delta, 0): estimated_column_begin + delta], # noqa: E203
line[max(estimated_column_begin - delta, 0) : estimated_column_begin + delta], # noqa: E203
)[0]
except IndexError:
labels_str = ""
Expand Down Expand Up @@ -676,7 +677,7 @@ def get_menus(self, text, year, week_number):
positions4 = [
(max(a.start() - 3, 0), a.end())
for a in list(re.finditer(self.split_days_regex_closed, soup_line1))
+ list(re.finditer(self.split_days_regex_closed, soup_line2))
+ list(re.finditer(self.split_days_regex_closed, soup_line2))
]

if positions3: # Two lines "Tagessuppe siehe Aushang"
Expand All @@ -702,7 +703,7 @@ def get_menus(self, text, year, week_number):
lines_weekdays = {"mon": "", "tue": "", "wed": "", "thu": "", "fri": ""}
# it must be lines[3:] instead of lines[2:] or else the menus would start with "Preis ab 0,90€" (from the
# soups) instead of the first menu, if there is a day where the bistro is closed.
for line in lines[soup_line_index + 3:]: # noqa: E203
for line in lines[soup_line_index + 3 :]: # noqa: E203
lines_weekdays["mon"] += " " + line[pos_mon:pos_tue].replace("\n", " ")
lines_weekdays["tue"] += " " + line[pos_tue:pos_wed].replace("\n", " ")
lines_weekdays["wed"] += " " + line[pos_wed:pos_thu].replace("\n", " ")
Expand Down Expand Up @@ -743,7 +744,7 @@ def get_menus(self, text, year, week_number):
try:
price_obj = Price(float(price_str))
except ValueError:
print(f"Warning: Error during parsing price: {price_str}")
warn(f"Error during parsing price: {price_str}")
dishes.append(
Dish(
dish_name.strip(),
Expand All @@ -770,7 +771,7 @@ class MedizinerMensaMenuParser(MenuParser):
labels_regex = r"(\s([A-C]|[E-H]|[K-P]|[R-Z]|[1-9])(,([A-C]|[E-H]|[K-P]|[R-Z]|[1-9]))*(\s|\Z))"
price_regex = r"(\d+(,(\d){2})\s?€)"

_label_lookup: Dict[str, Set[Label]] = {
_label_subclasses: Dict[str, Set[Label]] = {
"1": {Label.DYESTUFF},
"2": {Label.PRESERVATIVES},
"3": {Label.ANTIOXIDANTS},
Expand Down Expand Up @@ -970,7 +971,7 @@ class StraubingMensaMenuParser(MenuParser):
url = "https://www.stwno.de/infomax/daten-extern/csv/HS-SR/{calendar_week}.csv"
canteens = {Canteen.MENSA_STRAUBING}

_label_lookup: Dict[str, Set[Label]] = {
_label_subclasses: Dict[str, Set[Label]] = {
"1": {Label.DYESTUFF},
"2": {Label.PRESERVATIVES},
"3": {Label.ANTIOXIDANTS},
Expand Down
10 changes: 7 additions & 3 deletions src/test/test_menu_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import tempfile
import unittest
from datetime import date
from typing import Dict, List
from typing import Dict

from lxml import html # nosec: https://github.com/TUM-Dev/eat-api/issues/19

Expand Down Expand Up @@ -58,11 +58,15 @@ def test_get_all_dates(self) -> None:
working_days.append(start_date)
start_date += datetime.timedelta(days=1)

dates = []
tree = file_util.load_html(
f"{self.base_path_canteen.format(canteen=Canteen.MENSA_GARCHING.canteen_id)}"
f"/for-generation/overview.html",
)
dates: List[date] = self.studentenwerk_menu_parser.get_available_dates_for_html(tree)
menus = StudentenwerkMenuParser.get_daily_menus_as_html(tree)
for menu in menus:
html_menu = html.fromstring(html.tostring(menu))
dates.append(self.studentenwerk_menu_parser.extract_date_from_html(html_menu))
self.assertEqual(dates, working_days)

def test_studentenwerk(self) -> None:
Expand Down Expand Up @@ -92,7 +96,7 @@ def __get_menus(self, canteen: Canteen) -> Dict[date, Menu]:
f"{self.base_path_canteen.format(canteen=canteen.canteen_id)}/for-generation/{date_}.html",
)
studentenwerk_menu_parser = StudentenwerkMenuParser()
menu = studentenwerk_menu_parser.get_menu(tree, canteen, date_)
menu = studentenwerk_menu_parser.get_menu(tree, canteen)
if menu is not None:
menus[date_] = menu
return menus
Expand Down

0 comments on commit 4114cc3

Please sign in to comment.