Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bornovolokova Olga AT-18 #41

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions 1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
import xml.etree.ElementTree as ET
from json import dump
from urllib.request import urlopen

data = urlopen('https://lenta.ru/rss').read().decode('utf8')
root = ET.fromstring(data)

response_arr = [{'pubDate': item.find('pubDate').text, 'title': item.find('title').text}
for item in root.findall('channel/item')]

with open("news.json", "w", encoding = 'UTF-8') as news:
dump(response_arr, news, indent = 1, ensure_ascii = False)
19 changes: 19 additions & 0 deletions 2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@

import xml.etree.ElementTree as ET
from urllib.request import urlopen
import json

data = urlopen('https://lenta.ru/rss').read().decode('utf8')
root = ET.fromstring(data)
channelElement = root[0]

newsTitleDate = []
for i in channelElement.findall('item'):
elements = {}
for element in i:
elements[element.tag] = element.text
newsTitleDate.append(elements)

newsJson = json.dumps(newsTitleDate, ensure_ascii=False, indent=2).encode('utf8')
with open('news2.json', 'wb') as n:
n.write(newsJson)
14 changes: 14 additions & 0 deletions Belmondo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from itertools import groupby
from json import loads
from urllib.request import urlopen

url = 'https://ru.wikipedia.org/w/api.php?action=query&format=json&prop=revisions&rvlimit=500&titles=%D0%91%D0%B5%D0%BB%D1%8C%D0%BC%D0%BE%D0%BD%D0%B4%D0%BE,_%D0%96%D0%B0%D0%BD-%D0%9F%D0%BE%D0%BB%D1%8C'
data = loads(urlopen(url).read().decode('utf8'))

data_belmondo = groupby([i['timestamp'][:10] for i in data['query']['pages']['192203']['revisions']])
[print(d, len(list(e))) for d, e in data_belmondo]

#Резкий скачок правок произошёл 6.09.2021, в день смерти Бельмондо

#Самое большое количество правок сопало с датой смерти, но такой метод поиска не будет давать правильные результаты на 100%,
#так как это может не совпасть(могут произойти любые события, связанные с человеком), и дата будет определена неверно
12 changes: 12 additions & 0 deletions Gradskiy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
from itertools import groupby
from json import loads
from urllib.request import urlopen

url = 'https://ru.wikipedia.org/w/api.php?action=query&format=json&prop=revisions&rvlimit=500&titles=%D0%93%D1%80%D0%B0%D0%B4%D1%81%D0%BA%D0%B8%D0%B9,_%D0%90%D0%BB%D0%B5%D0%BA%D1%81%D0%B0%D0%BD%D0%B4%D1%80_%D0%91%D0%BE%D1%80%D0%B8%D1%81%D0%BE%D0%B2%D0%B8%D1%87'
data = loads(urlopen(url).read().decode('utf8'))

data_Gradskiy = groupby([i['timestamp'][:10] for i in data['query']['pages']['183903']['revisions']])
[print(d, len(list(e))) for d, e in data_Gradskiy ]


# Резкий скачок правок произошел 28.11.2021, в день смерти Градского
Loading