diff --git a/.gitignore b/.gitignore
index 5e73cfd20a..087f3ab5a4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,7 +11,11 @@ src*
build/*
dist/*
tmp/*
+
*.zip
+!doajtest/unit/resources/*.zip
+!doajtest/preservation_upload_test_package/*.zip
+
scratch.py
.coverage
htmlcov/*
diff --git a/cms/assets/img/sponsors/Degruyter.svg b/cms/assets/img/sponsors/Degruyter.svg
new file mode 100644
index 0000000000..4fcff995f5
--- /dev/null
+++ b/cms/assets/img/sponsors/Degruyter.svg
@@ -0,0 +1,3 @@
+
diff --git a/cms/assets/img/ambassadors/johndove.jpg b/cms/assets/img/team/johndove.jpg
similarity index 100%
rename from cms/assets/img/ambassadors/johndove.jpg
rename to cms/assets/img/team/johndove.jpg
diff --git a/cms/data/ambassadors.yml b/cms/data/ambassadors.yml
index 3f797bcd2d..f2a949b4f0 100644
--- a/cms/data/ambassadors.yml
+++ b/cms/data/ambassadors.yml
@@ -62,13 +62,6 @@
photo: "ivonne.jpg"
coi:
2022: https://drive.google.com/file/d/1HnGhYbvbzL34guWOmIqcthcwAN8NADX1/view?usp=sharing
-
-- name: John G. Dove
- region: North America
- bio: "John has had a career in executive management, and is now an independent consultant and open access advocate who works with organisations seeking to accelerate their transition to open access. He advises both for-profits and non-profits, and has a particular interest in identifying the steps necessary to flip an entire discipline’s scholarly record to open access. His ambassador activities focus on increasing the support to DOAJ from the community. He served for six years on NISO’s Information Discovery and Interchange Topic Committee, and has written for Learned Publishing, Against the Grain, and Scholarly Kitchen. John serves on the Board of Trustees of his local public library in Revere, Massachusetts. He has a B.A. in Mathematics from Oberlin College."
- photo: "johndove.jpg"
- coi:
- 2022: https://drive.google.com/file/d/1cWijl2xdmVjshsvaGTABOvC_chIIfuVA/view?usp=sharing
- name: Mahmoud Khalifa
region: Middle East and Persian Gulf
diff --git a/cms/data/nav.yml b/cms/data/nav.yml
index 8c5aa278ee..e8ec84fc22 100644
--- a/cms/data/nav.yml
+++ b/cms/data/nav.yml
@@ -56,11 +56,11 @@ entries:
secondary_mobile: true
route: doaj.support # ~~->Support:WebRoute~~
entries:
- - label: Support DOAJ
+ - label: Institutions and libraries
route: doaj.support # ~~->Support:WebRoute~~
- - label: Publisher supporters
+ - label: Publishers
route: doaj.publisher_supporters # ~~->PublisherSupporters:WebRoute~~
- - label: Supporters
+ - label: Institutional and library supporters
route: doaj.supporters # ~~->Supporters:WebRoute~~
- id: apply
label: Apply
diff --git a/cms/data/sponsors.yml b/cms/data/sponsors.yml
index 379c51cf69..62f4700a39 100644
--- a/cms/data/sponsors.yml
+++ b/cms/data/sponsors.yml
@@ -1,17 +1,14 @@
# List of sponsors separated by tiers (premier, sustaining, basic)
# ~~Sponsors:Data~~
-gold:
- name: Royal Society of Chemistry
url: https://www.rsc.org/
logo: rsc.png
-
+
- name: Georg Thieme Verlag KG
url: https://www.thieme.com/
logo: thieme.svg
-
-silver:
- name: AOSIS
url: https://aosis.co.za/
logo: aosis.png
@@ -19,7 +16,7 @@ silver:
- name: Cappelen Damm Akademisk
url: https://www.cappelendammundervisning.no/
logo: cda.jpg
-
+
- name: Copernicus Publications
url: https://publications.copernicus.org
logo: copernicus.svg
@@ -31,7 +28,7 @@ silver:
- name: Frontiers
url: https://www.frontiersin.org/
logo: frontiers.svg
-
+
- name: Knowledge E
url: https://knowledgee.com/
logo: knowledgee.png
@@ -43,7 +40,7 @@ silver:
- name: OA.Works
url: https://oa.works/
logo: oaworks.png
-
+
- name: SAGE Publishing
url: https://www.sagepublications.com/
logo: sage.svg
@@ -51,21 +48,19 @@ silver:
- name: Taylor & Francis Group
url: https://www.taylorandfrancisgroup.com/
logo: tf.svg
-
+
- name: John Wiley and Sons LTD
url: https://www.wiley.com/en-us
logo: Wiley_Wordmark_black.png
-
-bronze:
- name: American Chemical Society
url: https://pubs.acs.org/
logo: acs.jpg
-
+
- name: American Psychological Association
url: https://www.apa.org/pubs
logo: apa.png
-
+
- name: Cambridge University Press
url: https://www.cambridge.org/
logo: cambridge.svg
@@ -74,6 +69,10 @@ bronze:
url: https://www.digital-science.com/
logo: ds.svg
+- name: De Gruyter
+ url: https://www.degruyter.com/
+ logo: Degruyter.svg
+
- name: eLife Sciences Publications
url: https://elifesciences.org/
logo: elife.svg
@@ -81,7 +80,7 @@ bronze:
- name: Elsevier
url: https://www.elsevier.com/
logo: elsevier.svg
-
+
- name: Emerald Publishing
url: https://emeraldpublishing.com/
logo: emerald.svg
@@ -89,15 +88,15 @@ bronze:
- name: IEEE
url: https://www.ieee.org/
logo: ieee.png
-
+
- name: Institute of Physics
url: https://www.iop.org/
logo: iop.jpg
-
+
- name: International Union for Conservation of Nature
url: https://iucn.org/
logo: IUCN.svg
-
+
- name: JMIR Publications
url: https://jmirpublications.com/
logo: jmir.svg
@@ -129,12 +128,11 @@ bronze:
- name: SciFree
url: https://scifree.se/
logo: scifree.svg
-
+
- name: The IET
url: https://www.theiet.org/
logo: iet.svg
- name: Ubiquity Press
url: https://www.ubiquitypress.com/
- logo: ubiquity.svg
-
+ logo: ubiquity.svg
diff --git a/cms/data/team.yml b/cms/data/team.yml
index ce175c0667..e3a0b3df8e 100644
--- a/cms/data/team.yml
+++ b/cms/data/team.yml
@@ -6,17 +6,15 @@
photo: alejandra.png
bio: 'Alejandra has a Bachelor’s degree in Information Science and a Master’s degree in Digital Media. She has around ten years of experience in information management, knowledge management and scholarly communication at national and international level.'
coi:
- 2016: https://drive.google.com/file/d/0ByRf6PVViI-mVFhjdkZWaFJZOTQ/view?usp=sharing&resourcekey=0-gMdxxHXyyJB9zFZIuh99QQ
2018: https://drive.google.com/file/d/0ByRf6PVViI-mRlRYTDBPRlZiWTRxQ3VMTUZpQnZ5ZkwyLVQ4/view?usp=sharing&resourcekey=0-mlQ6rSCEnr6RfpCwh_4SMw
2020: https://drive.google.com/file/d/1PF7Cc9vGAwWGqpqDo7nRULjxWIF2NR_Q/view?usp=sharing
2022: https://drive.google.com/file/d/1E45ycyctDfYkh65ZCM8PMtQsYym_eP6L/view?usp=sharing
- name: Cenyu Shen
- role: Quality Team Lead and Managing Editor
+ role: Deputy Head of Editorial (Quality)
photo: cenyu.jpg
bio: 'Cenyu holds a PhD in Information Systems Science at Hanken School of Economics in Finland. She has spent around seven years on Open Access research with a particular focus on gold OA publishing concerning journals and publishers outside the mainstream. She was one of three DOAJ ambassadors for China from 2016 to 2017.'
coi:
- 2016: https://drive.google.com/file/d/0B0fPCpIPjZlmNHZCQmxpUmN6bUEtYUx2VHZnbjVySS1fRTlr/view?usp=sharing&resourcekey=0-1TRIV1MEQMhdGbmCd7CbOA
2020: https://drive.google.com/file/d/1rm9fjOF3OHJ9lR9wEUyQBQTO2KdoNQcE/view?usp=sharing
2022: https://drive.google.com/file/d/1Mn_CR0twKxyFbbHxsLSrgeU984BNOLlS/view?usp=sharing
@@ -25,7 +23,6 @@
photo: clara.jpg
bio: 'Clara has 10 years experience in the scholarly publishing industry. She worked at Cambridge University Press as an Open Access Project Manager until 2015. She also works in science communication as a freelancer at the University Pompeu Fabra, Barcelona. Clara speaks Spanish, Catalan, English and some German and French. She loves cetaceans, freediving, cycling, and is an enthusiastic cook.'
coi:
- 2016: https://drive.google.com/file/d/0ByRf6PVViI-mbDFybndLbldEbFE/view?usp=sharing&resourcekey=0-lKZNFwvUNdVAGKatvnKiPg
2018: https://drive.google.com/file/d/1LHmZSZ6bwf6U71fNvIibJa6R1lquNhfR/view?usp=sharing
2020: https://drive.google.com/file/d/1v4duxnoTcNo4UbL_GBa5D1T8JtTl7oY1/view?usp=sharing
2022: https://drive.google.com/file/d/1hevYxG1102llDy-_i-onwKbDuOlBguA_/view?usp=sharing
@@ -33,9 +30,8 @@
- name: Dominic Mitchell
role: Operations Manager
photo: dominic.jpg
- bio: 'Dominic has over 25 years experience working with publisher and library communities. He is responsible for operations and development of the DOAJ platform. He acts as Committee chair for the Think. Check. Submit. initiative, of which DOAJ is a founding organisation. He represents DOAJ in Project JASPER, a cross-industry project working to ensure that journals are preserved for the long term. He also sits on the OASPA Board of Directors and serves as Secretary. Outside of work, he is reluctantly becoming an expert in the playparks of Stockholm with his twin sons.'
+ bio: 'Dominic has over 25 years of experience working with publisher and library communities. He is responsible for operations and development of the DOAJ platform. He acts as Committee chair for the Think. Check. Submit. initiative, of which DOAJ is a founding organisation. He represents DOAJ in Project JASPER, a cross-industry project working to ensure that journals are preserved for the long term. He also sits on the OASPA Board of Directors and serves as Secretary. Outside of work, he is reluctantly becoming an expert in the playparks of Stockholm with his twin sons.'
coi:
- 2016: https://drive.google.com/file/d/0ByRf6PVViI-mWmU0UHZqZm1xcDQ/view?usp=sharing&resourcekey=0-BmQKwWn6Vb9ot73Xie66aA
2018: https://drive.google.com/file/d/13XX_GUrw2xRmXARjRrTxegULPT8Redka/view?usp=sharing
2020: https://drive.google.com/file/d/1nxFOuAdXLb8A-LulhNpz9i5vSmr5DBwF/view?usp=sharing
2022: https://drive.google.com/file/d/1HBF9RLaIt3lFNG6WDcV08fQSMS_C6zwA/view?usp=sharing
@@ -43,7 +39,7 @@
- name: Gala García Reátegui
role: Managing Editor
photo: gala.jpg
- bio: 'Gala holds a Masters Degree in Information and Documentation from Lyon 3 University in France. Prior to joining DOAJ, she worked for the Digital Strategy and Data Directorate at The French National Research Agency (ANR) and for the Open Archive HAL at the Center for Direct Scientific Communication (CCSD). Gala is Peruvian but lived for more than ten years in France. Today, she is based in Denmark. She loves meeting people from other cultures, trying local dishes or experiences: currently Gala goes winter bathing in the Limfjord, Denmark! She also loves running.'
+ bio: "Gala holds a Master's Degree in Information and Documentation from Lyon 3 University in France. Prior to joining DOAJ, she worked for the Digital Strategy and Data Directorate at The French National Research Agency (ANR) and for the Open Archive HAL at the Center for Direct Scientific Communication (CCSD). Gala is Peruvian but lived for more than ten years in France. Today, she is based in Denmark. She loves meeting people from other cultures, trying local dishes or experiences. Currently Gala goes winter bathing in the Limfjord, Denmark! She also loves running."
coi:
2023: https://drive.google.com/file/d/1R7XquFauefdmtjPIsfGfAWQoAw7NLIci/view?usp=sharing
@@ -58,16 +54,22 @@
- name: Joanna Ball
role: Managing Director
photo: joba.jpg
- bio: 'Joanna has had over 25 years experience of working within research libraries in the UK and Denmark, most recently as Head of Roskilde University Library, before joining DOAJ in 2022. She has also been involved with UKSG as Chair of Insights Editorial Board and a Trustee, and is currently Vice Chair. Joanna lives with her family in Roskilde and enjoys running in her spare time.'
+ bio: 'Joanna has over 25 years of experience working within research libraries in the UK and Denmark, most recently as Head of Roskilde University Library, before joining DOAJ in 2022. She has also been involved with UKSG as Chair of Insights Editorial Board and a Trustee, and is currently Vice Chair. Joanna lives with her family in Roskilde and enjoys running in her spare time.'
coi:
2022: https://drive.google.com/file/d/1-3xzwkHMclREgLhj_XNF5n6Nr4q2_bnw/view?usp=sharing
+- name: John G. Dove
+ role: Advisor
+ photo: johndove.jpg
+ bio: "John has had a career in executive management, and is now an independent consultant and open access advocate who works with organisations seeking to accelerate their transition to open access. He advises both for-profits and non-profits, and has a particular interest in identifying the steps necessary to flip an entire discipline’s scholarly record to open access. His ambassador activities focus on increasing the support to DOAJ from the community. He served for six years on NISO’s Information Discovery and Interchange Topic Committee, and has written for Learned Publishing, Against the Grain, and Scholarly Kitchen. John serves on the Board of Trustees of his local public library in Revere, Massachusetts. He has a B.A. in Mathematics from Oberlin College."
+ coi:
+ 2022: https://drive.google.com/file/d/1cWijl2xdmVjshsvaGTABOvC_chIIfuVA/view?usp=sharing
+
- name: Judith Barnsby
- role: Senior Managing Editor
+ role: Head of Editorial
photo: judith.jpg
- bio: 'Judith has 25 years experience in the scholarly publishing industry, working for a range of non-profit society publishers and service providers before joining DOAJ. She has a keen interest in publishing standards and protocols, and has served on the board of CLOCKSS and as chair of the PALS (publisher and library solutions) working group in the UK. Judith loves books, especially detective fiction, and volunteers in her local library.'
+ bio: 'Judith has 25 years of experience in the scholarly publishing industry, working for a range of non-profit society publishers and service providers before joining DOAJ. She has a keen interest in publishing standards and protocols, and has served on the board of CLOCKSS and as chair of the PALS (publisher and library solutions) working group in the UK. Judith loves books, especially detective fiction, and volunteers in her local library.'
coi:
- 2016: https://drive.google.com/file/d/0B0fPCpIPjZlmb3JmVkFYbjN5aTh1OUhLd2lZaEV0ZlFwbTZV/view?usp=sharing&resourcekey=0-o_PXKLk5UFbPk_-4B61jVA
2018: https://drive.google.com/file/d/0ByRf6PVViI-mV2lfMjByQjYxUkpMcXhuc2l5Q3ZDWlpiYUtZ/view?usp=sharing&resourcekey=0-6eiGIRal00eXvgJUTeN_lw
2020: https://drive.google.com/file/d/18MWTsze4cDQQRPHJl2XrYgHQvlxhsPZa/view?usp=sharing
2023: https://drive.google.com/file/d/1hUsVIY09N6WceSx1edTM-h516CJGkHcu/view?usp=share_link
@@ -77,7 +79,6 @@
photo: Kamel.jpg
bio: 'Kamel is Full Professor of Chemistry at the University of Bejaia, Algeria (ORCID). He gained his PhD in Process Engineering and Chemistry of Materials Science at the University of Setif, Algeria. Kamel joined DOAJ in 2016 as an Ambassador for North Africa. He is currently Creative Commons Algeria Chapter lead, director of the Laboratory of Organic Materials at the University of Bejaia and editor-in-chief of Algerian Journal of Natural Products. His scientific activity is focused on chemistry of Natural Products, scholarly communications and new developments in academic publishing. Father of 3 daughters, he likes travelling, healthy local foods & home-made snacks.'
coi:
- 2016: https://drive.google.com/file/d/0B0fPCpIPjZlmVEN4X1Q0RDdCams1NXhveW1HQmtMYU56bDE4/view?usp=sharing&resourcekey=0-wA1CGAbjB6FAX33gCDQmrA
2018: https://drive.google.com/file/d/1JdF2kh-fLXz8kPGN_3ijDt5y9K6s0hOQ/view?usp=sharing
2020: https://drive.google.com/file/d/1iXrjwLTNBXwKD2TwrPD9ApKL7O6uZ8Z7/view?usp=sharing
2022: https://drive.google.com/file/d/1cl18h_mYnNogYs8Rk-fhBTW6WKOTC2IF/view?usp=sharing
@@ -85,7 +86,7 @@
- name: Katrine Sundsbø
role: Community Manager
photo: katrine.jpeg
- bio: 'Katrine holds a Master’s degree in Cognitive Neuroscience, and has five years of experience in the field of scholarly communications. She has been an advocate for open access and visibility of research through various working groups, projects and through gamification of scholarly communications. Though Katrine is half Danish and half Norwegian, her son is named after a Swedish singer - and her British husband suggested the name!'
+ bio: "Katrine holds a Master’s degree in Cognitive Neuroscience, and has five years of experience in the field of scholarly communications. She has been an advocate for open access and visibility of research through various working groups, projects and through gamification of scholarly communications. Though Katrine is half Danish and half Norwegian, her son is named after a Swedish singer - and her British husband suggested the name!"
coi:
2023: https://drive.google.com/file/d/1yqK-Znq62T_QR_JjtcpQl6W_Ian2Ti4F/view?usp=share_link
@@ -94,28 +95,19 @@
photo: lars.jpg
bio: 'Lars worked at Danish university libraries for two decades and was Director of Libraries at Lund University, Sweden from 2001 to 2011. He founded the DOAJ in 2003, and was Managing Director from 2013-2021. He has vast experience in change management, re-engineering of academic libraries, and development of information services for research & higher education. For two decades Lars has been a strong advocate of open access and for providing services to the open access movement. He is co-founder of OpenDOAR, the Directory of Open Access Books and Think. Check. Submit. Lars lives outside Copenhagen, and is married with 4 children and 4 grandchildren. He enjoys vegetable gardening, growing cacti and succulents, and playing internet chess.'
coi:
- 2016: https://drive.google.com/file/d/0ByRf6PVViI-mbmo2aU9NWkx5dGs/view?usp=sharing&resourcekey=0-mpdRgVU9UlFjC614-woDvg
2018: https://drive.google.com/file/d/1mm1a8nbY5MQX9loqIs2ZQuVN-73RfPuN/view?usp=sharing
2021: https://drive.google.com/file/d/1bNj5sqUsu4sRLmm_YOuh3JCSMERzQ1Ro/view?usp=sharing
2022: https://drive.google.com/file/d/1fRJtvci2_j4vad0C5N1pfqm2sHZQkFz3/view?usp=sharing
- name: Leena Shah
- role: Managing Editor and Ambassador
+ role: Deputy Head of Editorial (Workflow) and Ambassador
photo: leena.jpg
bio: "Leena joined the DOAJ team in 2016 as an Ambassador for India before becoming a Managing Editor. Prior to joining DOAJ she worked as a science librarian at Nanyang Technological University, Singapore, where she developed a keen interest in scholarly communication & open science. A recent addition to her interests is artificial intelligence in scholarly communication. Leena holds a Master’s degree in Information Studies and lives in Singapore. She loves watching sci-fi shows and is enthusiastic about travelling to new places."
coi:
- 2016: https://drive.google.com/file/d/0B0fPCpIPjZlmTHZuaEtMSDNIeUpKT2Fid19jVjVFTkRoUmdj/view?usp=sharing&resourcekey=0-KqvRVa30bQEUfqO-YA1L-g
2018: https://drive.google.com/file/d/1tifEjAIlU3txBw9DjIcRW9cZL7YG7_nU/view?usp=sharing
2020: https://drive.google.com/file/d/1zU-lLB5W54E_QUm5uto5tqB6cZl83TAJ/view?usp=sharing
2022: https://drive.google.com/file/d/19rw-naMJqHkI5T7aDIDPUkwPutBdDpDm/view?usp=sharing
-- name: Luis Montilla
- role: Managing Editor
- photo: luis.jpeg
- bio: "Luis is a marine ecologist with a passion for improving the quality of scientific publishing. After finishing his Masters in Venezuela, he spent three years in Italy completing his PhD studying marine microbial symbioses in seagrass beds. In his free time, he enjoys reading and watching movies."
- coi:
- 2023: https://drive.google.com/file/d/1IJhnV2Ht5t5jilaCAFzpuFdYk7UMOjN3/view?usp=sharing
-
- name: Mahmoud Khalifa
role: Managing Editor and Ambassador
photo: mahmoud-new.jpg
@@ -151,7 +143,6 @@
photo: Rikard.jpg
bio: 'Rikard has a Bachelor of Arts degree with a Major in Cultural Sciences and a specialization in publishing. He enjoys reading about philosophy and religion.'
coi:
- 2016: https://drive.google.com/file/d/0ByRf6PVViI-mdnJPdldOM0hUMFU/view?usp=sharing&resourcekey=0-8dJAtvm2n7vXV9NhqZYckw
2018: https://drive.google.com/file/d/1tOnW8L6TwolyLpIXwMKTITf9wGh_ukLb/view?usp=sharing
2020: https://drive.google.com/file/d/14c0RgpyD2Slzyh5s8LGvj5OwWbL4H8NX/view?usp=sharing
2023: https://drive.google.com/file/d/1HQIh1DlfhEutTWniXDGLYFVa9VxJ4OT9/view?usp=share_link
@@ -161,7 +152,6 @@
photo: sonja.jpg
bio: 'Sonja is a former Information Librarian from Lund University Library. She has a B.A. in English, Bulgarian and Russian from Lund University and specialises in applications for journals in the Slavonic languages.'
coi:
- 2016: https://drive.google.com/file/d/0ByRf6PVViI-mNUFoZWV4YnZ3bDg/view?usp=sharing&resourcekey=0-1JRid_DHRMKbgdzmVYL7NQ
2018: https://drive.google.com/file/d/1M5AGEDP79uk2olCcmVYjKCsmzL7tG2Vc/view?usp=sharing
2020: https://drive.google.com/file/d/1-4RJYScTs_zMBeD5zESNvCoIBCWTOWHR/view?usp=sharing
2022: https://drive.google.com/file/d/1soZtiW6gyVJPl7P_J60j2TL2Fqzl0QAs/view?usp=sharing
@@ -178,7 +168,6 @@
photo: tom.jpg
bio: 'Tom has a PhD in molecular microbiology and spent several years in Africa doing research on malaria, sleeping sickness and meningococcal epidemics. He has been actively advocating open access and open science since 2012 when he joined the Open Knowledge community and became a member of the DOAJ advisory board. His current research interests are development of quality systems for the assessment of scholarly journals and articles, and research in the area of soil microbiology in relation to soil health and human health.'
coi:
- 2016: https://drive.google.com/file/d/0ByRf6PVViI-mYUFZNDRISTZodUU/view?usp=sharing&resourcekey=0-g13FJaUJpdR_t2rMLEyzEQ
2018: https://drive.google.com/file/d/1x0w-a1TWQdJDKPtQpGhmDZSdA4BhFSpI/view?usp=sharing
2020: https://drive.google.com/file/d/1VyirUdc6FBNOujl938bHf1JCL1jLNwXV/view?usp=sharing
2022: https://drive.google.com/file/d/1ww7WHQEg1395bPn20Arb7LJn9lIROdBl/view?usp=sharing
diff --git a/cms/pages/about/at-20.md b/cms/pages/about/at-20.md
index 1678adfe2a..6d0e339928 100644
--- a/cms/pages/about/at-20.md
+++ b/cms/pages/about/at-20.md
@@ -31,15 +31,15 @@ There is also an opportunity for you to [support DOAJ during its 20th year](/at-
- Event Time: 13:00 UTC
- Duration: 90 mins
- {% include "includes/svg/at-20/theme_global.svg" %}
- - **[Registration is open](https://us02web.zoom.us/webinar/register/WN_fu42oi59S7GZ366rjyAUGg#/registration)**
+ - **[Recording is available](https://www.youtube.com/watch?v=TRjtc-7tg8w)**
- Name: _DOAJ at 20: Global_
- Date: _28th September 2023_
- - Event Time: 13:00 UTC ([Check the event time](https://www.timeanddate.com/worldclock/fixedtime.html?iso=20230928T13&ah=1&am=30) where you are.)
+ - Event Time: 13:00 UTC
- Duration: 2 hours
- {% include "includes/svg/at-20/theme_trusted.svg" %}
- Name: _DOAJ at 20: Trusted_
- Date: _7th December 2023_
- - Event Time: to be confirmed
+ - Event Time: 14:00 UTC ([Check the event time](https://www.timeanddate.com/worldclock/fixedtime.html?msg=DOAJ+at+20%3A+Trusted&iso=20231207T14&p1=1440&ah=1&am=30) where you are.)
- Duration: 90 mins
## Open
diff --git a/cms/pages/apply/seal.md b/cms/pages/apply/seal.md
index b9c4968cca..7fc7f4d414 100644
--- a/cms/pages/apply/seal.md
+++ b/cms/pages/apply/seal.md
@@ -12,7 +12,7 @@ The DOAJ Seal is awarded to journals that demonstrate best practice in open acce
**Journals do not need to meet the Seal criteria to be accepted into DOAJ.**
-There are seven criteria which a journal must meet to be eligible for the DOAJ Seal. These relate to best practice in long term preservation, use of persistent identifiers, discoverability, reuse policies and authors' rights.
+There are seven criteria which a journal must meet to be eligible for the DOAJ Seal. These relate to best practices in long-term preservation, use of persistent identifiers, discoverability, reuse policies and authors' rights.
---
@@ -21,29 +21,29 @@ There are seven criteria which a journal must meet to be eligible for the DOAJ S
All seven criteria must be met for a journal to be awarded the Seal. Failure to maintain the best practice and standards described in these criteria may lead to removal of the Seal.
{:.tabular-list .tabular-list--ordered}
-1. Digital preservation
+1. Digital preservation (Archiving policy)
- The journal content must be continuously deposited in one of these archives:
- any archiving agency included in [Keepers Registry](https://keepers.issn.org/keepers)
- Internet Archive
- PubMed Central
-2. Persistent article identifiers
+2. Self-archiving (Repository policy)
+ - Authors must be permitted to deposit all versions of their paper in an institutional or subject repository.
+ - Preprint
+ - Author's Accepted Manuscript
+ - Published article (Version of Record)
+ - An embargo may not be applied.
+3. Persistent article identifiers (Unique identifiers)
- Articles must use persistent article identifiers. DOI, ARK or Handle are the most commonly used.
- All persistent links must resolve correctly.
-3. Metadata supply to DOAJ
+4. Metadata supply to DOAJ
- Article metadata must be uploaded to DOAJ regularly.
-4. License type
+5. License type
- The journal must permit the use of a Creative Commons license that allows the creation of derivative products.
- CC BY
- CC BY-SA
- CC BY-NC
- CC BY-NC-SA
-5. License information in articles
+6. License information in articles
- Creative Commons licensing information must be displayed in all full-text article formats.
-6. Copyright and publishing rights
+7. Copyright and publishing rights
- Authors must retain unrestricted copyright and all publishing rights when publishing under any license permitted by the journal.
-7. Self-archiving policy
- - Authors must be permitted to deposit all versions of their paper in an institutional or subject repository.
- - Preprint
- - Author's Accepted Manuscript
- - Published article (Version of Record)
- - An embargo may not be applied.
diff --git a/cms/pages/apply/transparency.md b/cms/pages/apply/transparency.md
index 0bda2aae3d..913d061882 100644
--- a/cms/pages/apply/transparency.md
+++ b/cms/pages/apply/transparency.md
@@ -23,13 +23,14 @@ These principles also acknowledge that publishers and editors are responsible fo
### JOURNAL CONTENT
-#### 1. Name of journal
+**1. Name of journal**
+
The journal's name should:
- Be unique and not be one that is easily confused with another journal.
- Not mislead potential authors and readers about the journal's origin, scope, or association with other journals and organisations.
-#### 2. Website
+**2. Website**
- Websites should be properly supported and maintained, with particular attention given to security aspects that help protect users from viruses and malware.
- As a minimum, websites should use https and not http, and all traffic should be redirected through https. Those responsible for the website should apply web standards and best ethical practices to the website's content, presentation, and application.
@@ -45,20 +46,22 @@ In addition to the requirements outlined above, the following items should be cl
- Authorship criteria.
- ISSNs (separate for print and electronic versions).
-#### 3. Publishing schedule
+**3. Publishing schedule**
+
A journal's publishing frequency should be clearly described, and the journal must keep to its publishing schedule unless there are exceptional circumstances.
-#### 4. Archiving
+**4. Archiving**
+
A journal's plan for electronic backup and long term digital preservation of the journal content, in the event that the journal and/or publisher stops operating, should be clearly indicated. Examples include PMC and those listed in [the Keepers Registry](https://keepers.issn.org/).
-#### 5. Copyright
+**5. Copyright**
- The copyright terms for published content should be clearly stated on the website and in the content.
- The copyright terms should be separate and distinct from the copyright of the website.
- The copyright holder should be named on the full text of all published articles (HTML and PDF).
- If the copyright terms are described in a separate form, this should be easy to find on the website and available to all.
-#### 6. Licencing
+**6. Licencing**
- Licencing information should be clearly described on the website.
- Licencing terms should be indicated on the full text of all published articles (HTML and PDF).
@@ -69,7 +72,8 @@ If Creative Commons licences are used, then the terms of that licence should als
### JOURNAL PRACTICES
-#### 7. Publication ethics and related editorial policies
+**7. Publication ethics and related editorial policies**
+
A journal should have policies on publication ethics (for example, [COPE's Core Practice guidance](https://publicationethics.org/core-practices)). These should be visible on its website, and should refer to:
- Journal's policies on [authorship and contributorship](https://publicationethics.org/authorship).
@@ -84,7 +88,8 @@ A journal should have policies on publication ethics (for example, [COPE's Core
Editors and publishers are responsible for ensuring the integrity of the scholarly literature in their journals and should ensure they outline their policies and procedures for handling such issues when they arise. These issues include plagiarism, citation manipulation, and data falsification/fabrication, among others. Neither the journal’s policies nor the statements of its editors should encourage such misconduct, or knowingly allow such misconduct to take place. In the event that a journal's editors or publisher are made aware of any allegation of research misconduct relating to a submitted or published article in their journal, the editor or publisher should follow [COPE's guidance](https://publicationethics.org/guidance) (or equivalent) in dealing with allegations.
-#### 8. Peer review
+**8. Peer review**
+
Peer review is defined as obtaining advice on manuscripts from reviewers/experts in the manuscript’s subject area. Those individuals should not be part of the journal's editorial team. However, the specific elements of peer review may differ by journal and discipline, so the following should be clearly stated on the website:
- Whether or not the content is peer reviewed.
@@ -105,30 +110,31 @@ Journals should not guarantee acceptance of initial manuscript submissions. Stat
The date of publication should be published with all published research. Dates of submission and acceptance are preferred as well.
-#### 9. Access
+**9. Access**
+
If any of the online content is not freely accessible to everyone, the method of gaining access (for example, registration, subscription, or pay-per-view fees) should be clearly described. If offline versions (for example, print) are available, this should be clearly described along with any associated charges.
### ORGANISATION
-#### 10. Ownership and management
+**10. Ownership and management**
- Information about the ownership and management of a journal should be clearly indicated on the journal's website.
- Organisational names should not be used in a way that could mislead potential authors and editors about the nature of the journal's owner.
- If a journal is affiliated with a society, institution, or sponsor, links to their website(s) should be provided where available.
-#### 11. Advisory body
+**11. Advisory body**
Journals should have editorial boards or other advisory bodies whose members are recognised experts in the subject areas stated in the journal's aims and scope.
- The full names and affiliations of the members should be provided on the journal's website.
- The list should be up to date, and members must agree to serve.
- To avoid being associated with predatory or deceptive journals, journals should periodically review their board to ensure it is still relevant and appropriate.
-#### 12. Editorial team/contact information
+**12. Editorial team/contact information**
Journals should provide the full names and affiliations of their editors as well as contact information for the editorial office, including a full mailing address, on the journal’s website.
### BUSINESS PRACTICES
-#### 13. Author fees
+**13. Author fees**
- If author fees are charged (such as article processing charges, page charges, editorial processing charges, language editing fees, colour charges, submission fees, membership fees, or other supplementary charges), then the fees should be clearly stated on the website.
- If there are no such fees, this should be clearly stated.
@@ -141,14 +147,16 @@ Journals should provide the full names and affiliations of their editors as well
- When and how to apply for a waiver.
- Author fees or waiver status should not influence editorial decision making, and this should be clearly stated.
-#### 14. Other revenue
+**14. Other revenue**
+
Business models or revenue sources should be clearly stated on the journal's website.
Examples include author fees (see section 13), subscriptions, sponsorships and subsidies, advertising (see section 15), reprints, supplements, or special issues.
Business models or revenue sources (for example, reprint income, supplements, special issues, sponsorships) should not influence editorial decision making.
-#### 15. Advertising
+**15. Advertising**
+
Journals should state whether they accept advertising. If they do, they should state their advertising policy, including:
- Which types of advertisements will be considered.
@@ -157,7 +165,8 @@ Journals should state whether they accept advertising. If they do, they should s
Advertisements should not be related in any way to editorial decision making and should be kept separate from the published content.
-#### 16. Direct marketing
+**16. Direct marketing**
+
Any direct marketing activities, including solicitation of manuscripts, that are conducted on behalf of the journal should be appropriate, well targeted, and unobtrusive. Information provided about the publisher or journal should be truthful and not misleading for readers or authors.
## Version history
diff --git a/cms/pages/legal/terms.md b/cms/pages/legal/terms.md
index ad872453eb..1b27b66e99 100644
--- a/cms/pages/legal/terms.md
+++ b/cms/pages/legal/terms.md
@@ -32,7 +32,7 @@ DOAJ uses a variety of licenses for the different parts of its website and the c
+ In our [OAI-PMH feed](/docs/oai-pmh)
+ In the [full data dump of all article metadata](/docs/public-data-dump/).
-4. The *open source software* that DOAJ is built with is licensed under [an Apache license Version 2](https://github.com/DOAJ/doaj/blob/a6fc2bee499b5a8a1f24fb098acfb8e10bd72503/portality/static/vendor/select2-3.5.4/LICENSE).
+4. The *open source software* that DOAJ is built with is licensed under [an Apache license Version 2](https://github.com/DOAJ/doaj/blob/develop/LICENSE).
---
diff --git a/cms/pages/support/index.md b/cms/pages/support/index.md
index 8c9ad71dc4..cc39fbfd44 100644
--- a/cms/pages/support/index.md
+++ b/cms/pages/support/index.md
@@ -2,7 +2,7 @@
layout: sidenav
sidenav_include: /includes/_sidenav_donation.html
include: /includes/contribution_rates.html
-title: Support DOAJ
+title: Institutional and library supporter model
section: Support
sticky_sidenav: true
featuremap:
@@ -11,10 +11,37 @@ featuremap:
---
-Support of DOAJ by academic organisations is vital and we are proud to acknowledge that over 80% of our support comes to us this way. We are very grateful to all our supporting academic organisations from around the world.
+Support of DOAJ by academic organisations is vital, and we are proud to acknowledge that over 80% of our support comes to us this way. We are very grateful to all our supporting academic organisations worldwide.
-The suggested contributions for academic organisations are below. Use the table to find the most appropriate option for your organisation. [Send an email](mailto:joanna@doaj.org) to Joanna Ball, Managing Director, with the details of the support level you have chosen. Alternatively, you can use our invoice-free one-time donation button to send us an amount of your choosing.
+### 2024 pricing
-(Publishers interested in supporting us should read the [publisher supporters](/support/publisher-supporters/) page.)
+For 2024, we have revised and simplified our supporter model to align with the levels recommended by SCOSS. This new model enables us to invest in the organisation's future and to continue to provide a high-quality service to our community.
+
+| | Euros(€) | USDs($) | GBPs(£) |
+|---------------------|----------|---------|---------|
+| Large organisations | 4,000 | 4,400 | 3,440 |
+| Small organisations | 2,000 | 2,200 | 1,720 |
+| Organisations from [low- and middle-income countries](https://datatopics.worldbank.org/world-development-indicators/the-world-by-income-and-region.html) | 500 | 550 | 430 |
+
+A 30% discount will be applied to institutions supporting via a billing consortium. Please contact [supporters@doaj.org](mailto:supporters@doaj.org) for further information.
+
+We always have a wishlist of development projects for which we require additional funding. Please contact us if you would like to support us over and above our standard rates.
+
+### Why you should support us
+
+- We are community-led and -governed. Your support enables our commitment to being 100% independent.
+- Supporting open infrastructure is a strategic choice for libraries and institutions, demonstrating your commitment to open research and sustaining open infrastructure.
+- We are seeing a steady increase in demand: the number of applications we receive each year has increased by 60% since 2018, and our investigations into questionable publishing practices are becoming more complex.
+- Help us deliver our role in driving standards and best practice in open access publishing, for example through the [Principles of transparency and best practice in scholarly publishing](/apply/transparency/) and the [OA Journals Toolkit](https://www.oajournals-toolkit.org/).
+- You rely extensively on our metadata as a source of trusted journals, integrating it into discovery systems and open access services.
+
+By supporting us, your organisation will join [a growing family of like-minded institutions](/support/supporters/) committed to ensuring quality content is available online for everyone. Supporting DOAJ is a statement of belief in equitable open knowledge and science.
+
+### Benefits for institutional and library supporters
+
+- We will add your institution’s name to [our Supporters page](/support/supporters/)
+- you can include details of your DOAJ support in marketing activities
+- you can use our logo on your institution’s websites and in other communications
+- you can integrate into your services the DOAJ metadata via our OAI/PMH service, our API or the public data dump
---
diff --git a/cms/pages/support/publisher-supporters.md b/cms/pages/support/publisher-supporters.md
index 1d82e26b2b..f010872aba 100644
--- a/cms/pages/support/publisher-supporters.md
+++ b/cms/pages/support/publisher-supporters.md
@@ -1,7 +1,7 @@
---
layout: sidenav
include: /data/publisher-supporters.html
-title: Publisher supporters
+title: Publisher supporter model
section: Support
sticky_sidenav: true
toc: true
@@ -10,15 +10,62 @@ featuremap:
- ~~->PublisherSupportersData:Template~~
---
-The publishers on this page have chosen to show their commitment to quality, peer-reviewed open access by supporting DOAJ. We thank them! Without them, our work would not be possible.
+DOAJ relies on the support of publishers and [libraries](/support/) to ensure that its metadata and services remain free for all. The publishers on this page have chosen to show their commitment to quality, peer-reviewed open access by supporting DOAJ. We thank them as without them, our work would not be possible.
-**To become a publisher supporter**, send an email to [our Help desk](mailto:helpdesk@doaj.org) and we will provide with details on how to support us. Your organisation will be listed on this page.
+## 2024 pricing
-'Premier' and 'Sustaining' publishers have committed to supporting DOAJ for a three-year period. 'Basic' publishers support us for one year.
+We are introducing a revised and simplified model for publishers to support DOAJ for 2024 and publishing this openly in line with [our commitment to the Principles of Open Scholarly Infrastructure](https://blog.doaj.org/2022/10/06/doaj-commits-to-the-principles-of-open-scholarly-infrastructure-posi/). We are also relaunching the set of benefits for publishers choosing to support us.
-
{% include '/data/sponsors.html' %}
+We only accept support through our publisher supporter model from publishers with journals already indexed in DOAJ. Other routes to support DOAJ are as [an institution](/support/) or as [an individual via Paypal](https://www.paypal.com/donate/?campaign_id=4VXR4TJ69MDJJ). Non-commercial/institutional rates are only available to community-led, smaller publishers with limited funding. Please contact [supporters@doaj.org](mailto:supporters@doaj.org) if unsure which category applies.
+
+Please contact [supporters@doaj.org](mailto:supporters@doaj.org) if you want to contribute to DOAJ’s operating costs as a publisher supporter.
+
+### Commercial publishers
+
+| Band | Number of journals in DOAJ | GBPs (£)* |
+|------|----------------------------|-----------|
+| A | 600+ | 25,000 |
+| B | 400-599 | 20,000 |
+| C | 150-399 | 17,000 |
+| D | 100-149 | 14,000 |
+| E | 50-99 | 8000 |
+| F | 30-49 | 6000 |
+| G | 10-29 | 5000 |
+| H | 1-9 | 3500 |
+
+### Non-commercial / institutional publishers
+
+| Band | Number of journals in DOAJ | GBPs (£)* |
+|------|----------------------------|-----------|
+| C | 150-399 | 3500 |
+| D | 100-149 | 3000 |
+| E | 50-99 | 2500 |
+| F | 30-49 | 2000 |
+| G | 10-29 | 1500 |
+| H | 1-9 | 1000 |
+
+*A 50% discount is available for supporters in Low- and Middle-Income Countries according to the World Bank classification.
+
+## 2024 publisher benefits
-## Benefits for contributing publishers and aggregators
+1. Your logo on the DOAJ website
+2. A post from all our social media platforms (Twitter, Facebook, LinkedIn, Mastodon, Instagram) acknowledging your organisation as a Supporter
+3. A blog post at the start of the year introducing our new supporters
+4. Our DOAJ Supporter logo which you can use for your website
+5. Access to our Public Data Dump
+6. For supporters from Bands A-E, or those contributing over the suggested amounts, a personal DOAJ contact to whom all enquiries regarding your applications and updates can be directed
+
+## Sponsorship opportunities
+
+We are particularly grateful to those publishers who can contribute over and above these amounts. In these cases, we can offer sponsorship opportunities that enhance our services and support open access globally, for example:
+
+- Specific technical developments
+- Ambassador programme
+- Webinar programmes and events
+
+Please get in touch to discuss.
+
+## 2023 benefits for publisher supporters
([A downloadable version](https://docs.google.com/document/d/1xTVxUvqLkh2-r53cYlWdSIHsPGSnhcE7gi7bRFCaJik/edit?usp=sharing) of these benefits is available.)
@@ -33,4 +80,6 @@ The publishers on this page have chosen to show their commitment to quality, pee
| | | A CSV file, generated annually, for recording changes in and which DOAJ updates your live records with. |
| | | Exposure across all our social media channels: Twitter, Instagram, LinkedIn, Facebook, WeChat. (Stats available.) |
+
{% include '/data/sponsors.html' %}
+
## Other publisher supporters
diff --git a/cms/pages/support/supporters.md b/cms/pages/support/supporters.md
index c7d08ddda8..6cf98e6b0f 100644
--- a/cms/pages/support/supporters.md
+++ b/cms/pages/support/supporters.md
@@ -7,9 +7,9 @@ featuremap: ~~Supporters:Fragment~~
---
-We are proud that over 80% of DOAJ's funding comes from academic organisations (libraries, library consortia, universities, research centres). Without this vital support, we wouldn't be able to continue the high levels of service that the research community expects of us. We are grateful for the trust shown in us by our supporters.
+We are proud that over 80% of our funding comes from academic organisations (libraries, library consortia, universities, research centres). Without this vital support, we couldn't deliver the services the research community expects of us. We are grateful for the trust shown in us by our supporters.
- Check [our support page](/support/) for more information on supporter levels and categories.
+Check [our Institutions and libraries support page](/support/) for pricing and benefits.
---
diff --git a/cms/sass/components/_accordion.scss b/cms/sass/components/_accordion.scss
new file mode 100644
index 0000000000..e066ee02f6
--- /dev/null
+++ b/cms/sass/components/_accordion.scss
@@ -0,0 +1,3 @@
+.accordion:focus-within {
+ border: $grapefruit solid;
+}
\ No newline at end of file
diff --git a/cms/sass/components/_buttons.scss b/cms/sass/components/_buttons.scss
index 1e71d3aceb..061c75c454 100644
--- a/cms/sass/components/_buttons.scss
+++ b/cms/sass/components/_buttons.scss
@@ -117,3 +117,10 @@ button[type="submit"].button--secondary {
color: currentColor;
}
}
+
+button.aria-button {
+ all: inherit;
+ -webkit-appearance: none;
+ -moz-appearance: none;
+ appearance: none;
+}
diff --git a/cms/sass/components/_filters.scss b/cms/sass/components/_filters.scss
index e82883841b..fddb1e6e07 100644
--- a/cms/sass/components/_filters.scss
+++ b/cms/sass/components/_filters.scss
@@ -4,6 +4,24 @@
margin-bottom: $spacing-04;
border: 0;
@include typescale-06;
+
+ input[type="checkbox"],
+ input[type="radio"] {
+ display: unset;
+ opacity: 0;
+ width: 0.8em;
+ height: 0.8em;
+
+ &:focus + label {
+ outline: dashed 2px lightgrey;
+ outline-offset: 1px;
+ }
+
+ &:focus:not(:focus-visible){
+ outline: none;
+ }
+ }
+
}
.filters__heading {
@@ -50,6 +68,8 @@
max-height: $spacing-07;
height: auto;
overflow-y: auto;
+ padding-top: $spacing-01;
+
@include unstyled-list;
li {
diff --git a/cms/sass/components/_form.scss b/cms/sass/components/_form.scss
index b203ff5988..fd97423f9f 100644
--- a/cms/sass/components/_form.scss
+++ b/cms/sass/components/_form.scss
@@ -82,7 +82,7 @@
border-left: 1px solid $sanguine;
}
-.form__long-help {
+.form__long-help, .form__click-to-copy {
cursor: pointer;
&:hover {
diff --git a/cms/sass/components/_skip-to-main-content.scss b/cms/sass/components/_skip-to-main-content.scss
new file mode 100644
index 0000000000..3542adb62c
--- /dev/null
+++ b/cms/sass/components/_skip-to-main-content.scss
@@ -0,0 +1,35 @@
+/* Back to main content button */
+
+.skip-to-main {
+ position: absolute;
+ z-index: 10000;
+ display: flex;
+ flex-direction: row;
+ align-items: center;
+ min-width: min-content;
+ padding: 5px;
+ top: 10px;
+ left: 10px;
+ background-color: $grapefruit;
+
+ svg {
+ display: block;
+ margin: 0 auto;
+ stroke: $warm-black;
+ margin-rigth: 10px;
+ }
+ &:hover, &:focus {
+ svg {
+ margin-right: 10px;
+ }
+ }
+ &:hover:after, &:focus:after {
+ content: " Skip to main content";
+ color: $warm-black;
+ vertical-align: bottom;
+ -webkit-font-feature-settings: 'liga' 1;
+ -moz-font-feature-settings: 'liga' 1;
+ font-feature-settings: 'liga' 1;
+ transition: 0.5 smooth;
+ }
+}
\ No newline at end of file
diff --git a/cms/sass/components/_tag.scss b/cms/sass/components/_tag.scss
index 1f24ebce92..836e55c7ef 100644
--- a/cms/sass/components/_tag.scss
+++ b/cms/sass/components/_tag.scss
@@ -90,3 +90,8 @@
color: $white;
}
}
+
+.tag--confirmation {
+ background: $dark-green;
+ color: $white;
+}
diff --git a/cms/sass/main.scss b/cms/sass/main.scss
index cdd22133b8..661f2a4ae6 100644
--- a/cms/sass/main.scss
+++ b/cms/sass/main.scss
@@ -28,6 +28,7 @@
"layout/sidenav",
"components/alert",
+ "components/accordion",
"components/back-to-top",
"components/buttons",
"components/card",
@@ -52,6 +53,7 @@
"components/review-table",
"components/select2",
"components/search-results",
+ "components/skip-to-main-content",
"components/stat",
"components/stretch-list",
"components/tabs",
diff --git a/deploy/doaj_gunicorn_config.py b/deploy/doaj_gunicorn_config.py
index f9425de5e5..a08dd6ef62 100644
--- a/deploy/doaj_gunicorn_config.py
+++ b/deploy/doaj_gunicorn_config.py
@@ -1,7 +1,7 @@
import multiprocessing
bind = "0.0.0.0:5050"
-workers = multiprocessing.cpu_count() * 8 + 1
+workers = multiprocessing.cpu_count() * 6 + 1
proc_name = 'doaj'
max_requests = 1000
@@ -13,4 +13,4 @@
max_requests_jitter = 100
timeout = 40
-graceful_timeout = 40
\ No newline at end of file
+graceful_timeout = 40
diff --git a/deploy/lambda/alert_backups_missing.py b/deploy/lambda/alert_backups_missing.py
index 38a9edbc2e..566a361b9a 100644
--- a/deploy/lambda/alert_backups_missing.py
+++ b/deploy/lambda/alert_backups_missing.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-""" Steven Eardley 2020-02-07 for DOAJ - uploaded manually (todo: we should upload this in the release script) """
+""" Steven Eardley 2023-09-15 for DOAJ - uploaded manually (todo: we should upload this in the release script) """
# ~~BackupsMissing:Monitoring->Lambda:Technology~~
@@ -8,23 +8,25 @@
import json
from datetime import datetime, timezone, timedelta
-from portality.lib.dates import FMT_DATETIME_STD
-
s3 = boto3.client('s3')
# Check the doaj elasticsearch snapshot bucket has been updated today (should happen daily at 0600 via background job)
-buckets = ['doaj-index-backups']
+buckets = ['doaj-index-ipt-backups']
+
# Check the doaj-nginx logs bucket has been updated today (should happen daily at 0630 via cron logrotate)
-buckets += ['doaj-nginx-logs']
+# buckets += ['doaj-nginx-logs']
def lambda_handler(event, context):
""" The main function executed by Lambda"""
+ start = datetime.utcnow()
summary = {'success': [], 'fail': []}
for b in buckets:
+ print('Checking bucket {0} was updated today'.format(b))
+
# First check the bucket actually exists
try:
s3.head_bucket(Bucket=b)
@@ -32,11 +34,13 @@ def lambda_handler(event, context):
error_code = int(e.response['Error']['Code'])
if error_code == 404:
send_alert_email(b, last_mod=None)
+ raise
# Then check the expected entry exists in the bucket's objects.
files = list_bucket_keys(bucket_name=b)
old_to_new = sorted(files, key=lambda f: f['LastModified'])
newest = old_to_new[-1]
+ print('Latest backup is', newest)
# If the newest file is older than 1 day old, our backups are not up to date.
if datetime.now(timezone.utc) - newest['LastModified'] > timedelta(days=1):
@@ -47,6 +51,8 @@ def lambda_handler(event, context):
summary['success'].append(b)
print(summary) # For the CloudWatch logs
+ print('Completed in', str(datetime.utcnow() - start))
+
return str(summary)
@@ -86,8 +92,8 @@ def send_alert_email(bucket, last_mod):
msg = 'AWS backup error: bucket {b} is missing.'.format(b=bucket)
else:
msg = 'AWS backup error: bucket {b} has not been updated today - it was last modified on {t}.' \
- '\nYou may wish to check the corresponding logs.'.format(b=bucket,
- t=last_mod.strftime(FMT_DATETIME_STD))
+ '\nYou may wish to check the corresponding logs.'.format(b=bucket, t=last_mod.strftime(
+ '%Y-%m-%dT%H:%M:%SZ'))
r = botocore.vendored.requests.post('https://api.mailgun.net/v3/doaj.org/messages',
auth=('api', credentials.get('ERROR_MAIL_API_KEY', '')),
diff --git a/deploy/nginx/doaj b/deploy/nginx/doaj
index b52db3aea0..4e6c3b0576 100644
--- a/deploy/nginx/doaj
+++ b/deploy/nginx/doaj
@@ -36,17 +36,27 @@ map $http_user_agent $block_ua {
~*curl 1;
}
+# the public server (deprecated, use failover)
upstream doaj_apps {
- server 10.131.191.139:5050;
+ server 10.131.191.139:5050; #doaj-public-app-1
}
+
+# Background server runs async tasks
upstream doaj_bg_apps {
- #server 10.131.56.133:5050; #old bg machine
- server 10.131.12.33:5050;
+ server 10.131.12.33:5050; #doaj-background-app-1
+}
+
+# Editor and admin site components
+upstream doaj_ed_failover {
+ server 10.131.56.133:5050; #doaj-editor-app-1
+ server 10.131.12.33:5050 backup; #doaj-background-app-1
}
+
+# For public site components, try all servers
upstream doaj_apps_failover {
- server 10.131.191.139:5050;
- #server 10.131.56.133:5050 backup; #old bg machine
- server 10.131.12.33:5050 backup;
+ server 10.131.191.139:5050; #doaj-public-app-1
+ server 10.131.12.33:5050 backup; #doaj-background-app-1
+ server 10.131.56.133:5050 backup; #doaj-editor-app-1
}
upstream doaj_index {
server 10.131.191.132:9200;
@@ -121,6 +131,7 @@ server {
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
}
+
location /search {
if ($block_ua) {return 403;}
limit_req zone=general burst=10 nodelay;
@@ -144,9 +155,7 @@ server {
proxy_buffering off;
}
- # for now we are going to send all login functions to the bg machine
- # technically ONLY the routes that require file upload need to go to the bg machine
- # but we think it is handy to separate them out, and later we could send them to other machines
+ # technically only the routes that require file upload need to go to the bg machine, but separate for consistency
location /account {
limit_req zone=general burst=10 nodelay;
proxy_pass http://doaj_bg_apps;
@@ -157,6 +166,19 @@ server {
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
}
+
+ # prefer the editor machine for application form work (but application_quick_reject goes to background async)
+ location ~* /admin/application/ {
+ limit_req zone=general burst=10 nodelay;
+ proxy_pass http://doaj_ed_failover;
+ proxy_redirect off;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_buffering off;
+ }
+
location /admin { # there are admin bulk actions that MUST go to bg machine
limit_req zone=general burst=10 nodelay;
proxy_pass http://doaj_bg_apps;
@@ -167,9 +189,10 @@ server {
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
}
+
location /editor {
limit_req zone=general burst=10 nodelay;
- proxy_pass http://doaj_bg_apps;
+ proxy_pass http://doaj_ed_failover;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
@@ -177,9 +200,10 @@ server {
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
}
+
location /journal/readonly {
limit_req zone=general burst=10 nodelay;
- proxy_pass http://doaj_bg_apps;
+ proxy_pass http://doaj_ed_failover;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
@@ -187,7 +211,8 @@ server {
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
}
- location /publisher { # only /publisher/uploadfile MUST go to bg, and /publisher/uploadFile
+
+ location /publisher { # only /publisher/uploadfile MUST go to background
limit_req zone=general burst=10 nodelay;
proxy_pass http://doaj_bg_apps;
proxy_redirect off;
@@ -197,7 +222,8 @@ server {
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
}
- location /service {
+
+ location /service { # performs locks etc - handle on the background server
limit_req zone=general burst=10 nodelay;
proxy_pass http://doaj_bg_apps;
proxy_redirect off;
@@ -221,6 +247,7 @@ server {
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
}
+
location /csv {
limit_req zone=general burst=10 nodelay;
proxy_pass http://doaj_bg_apps;
@@ -235,6 +262,7 @@ server {
location =/robots.txt {
alias /home/cloo/doaj/src/doaj/deploy/robots-production.txt;
}
+
location /static/ {
alias /home/cloo/doaj/src/doaj/portality/static/;
autoindex off;
diff --git a/doajtest/matrices/article_create_article/issn_validation_against_journal.matrix.csv b/doajtest/matrices/article_create_article/issn_validation_against_journal.matrix.csv
new file mode 100644
index 0000000000..0d2f704aba
--- /dev/null
+++ b/doajtest/matrices/article_create_article/issn_validation_against_journal.matrix.csv
@@ -0,0 +1,17 @@
+test_id,eissn,pissn,validated
+1,eissn_in_doaj,pissn_in_doaj,yes
+2,eissn_in_doaj,eissn_not_in_doaj,
+3,eissn_in_doaj,pissn_not_in_doaj,
+4,eissn_in_doaj,!eissn_in_doaj,
+5,pissn_in_doaj,eissn_in_doaj,
+6,pissn_in_doaj,eissn_not_in_doaj,
+7,pissn_in_doaj,pissn_not_in_doaj,
+8,pissn_in_doaj,!pissn_in_doaj,
+9,eissn_not_in_doaj,eissn_in_doaj,
+10,eissn_not_in_doaj,pissn_in_doaj,
+11,eissn_not_in_doaj,pissn_not_in_doaj,
+12,eissn_not_in_doaj,!eissn_not_in_doaj,
+13,pissn_not_in_doaj,eissn_in_doaj,
+14,pissn_not_in_doaj,pissn_in_doaj,
+15,pissn_not_in_doaj,eissn_not_in_doaj,
+16,pissn_not_in_doaj,!pissn_not_in_doaj,
diff --git a/doajtest/matrices/article_create_article/issn_validation_against_journal.settings.csv b/doajtest/matrices/article_create_article/issn_validation_against_journal.settings.csv
new file mode 100644
index 0000000000..a8eab3f4ce
--- /dev/null
+++ b/doajtest/matrices/article_create_article/issn_validation_against_journal.settings.csv
@@ -0,0 +1,19 @@
+field,test_id,eissn,pissn,validated
+type,index,generated,generated,conditional
+deafult,,,,no
+,,,,
+values,,eissn_in_doaj,eissn_in_doaj,yes
+values,,pissn_in_doaj,pissn_in_doaj,no
+values,,eissn_not_in_doaj,eissn_not_in_doaj,
+values,,pissn_not_in_doaj,pissn_not_in_doaj,
+,,,,
+,,,,
+conditional validated,,eissn_in_doaj,pissn_in_doaj,yes
+constraint eissn,,eissn_in_doaj,!eissn_in_doaj,
+constraint eissn,,eissn_not_in_doaj,!eissn_not_in_doaj,
+constraint eissn,,pissn_not_in_doaj,!pissn_not_in_doaj,
+constraint eissn,,pissn_in_doaj,!pissn_in_doaj,
+constraint pissn,,eissn_in_doaj,!eissn_in_doaj,
+constraint pissn,,eissn_not_in_doaj,!eissn_not_in_doaj,
+constraint pissn,,pissn_not_in_doaj,!pissn_not_in_doaj,
+constraint pissn,,pissn_in_doaj,!pissn_in_doaj,
\ No newline at end of file
diff --git a/doajtest/matrices/article_create_article/issn_validation_against_journal.settings.json b/doajtest/matrices/article_create_article/issn_validation_against_journal.settings.json
new file mode 100644
index 0000000000..11d1012a96
--- /dev/null
+++ b/doajtest/matrices/article_create_article/issn_validation_against_journal.settings.json
@@ -0,0 +1,119 @@
+{
+ "parameters": [
+ {
+ "name": "test_id",
+ "type": "index"
+ },
+ {
+ "name": "eissn",
+ "type": "generated",
+ "values": {
+ "eissn_in_doaj": {
+ "constraints": {
+ "pissn": {
+ "nor": [
+ "eissn_in_doaj"
+ ]
+ }
+ }
+ },
+ "pissn_in_doaj": {
+ "constraints": {
+ "pissn": {
+ "nor": [
+ "pissn_in_doaj"
+ ]
+ }
+ }
+ },
+ "eissn_not_in_doaj": {
+ "constraints": {
+ "pissn": {
+ "nor": [
+ "eissn_not_in_doaj"
+ ]
+ }
+ }
+ },
+ "pissn_not_in_doaj": {
+ "constraints": {
+ "pissn": {
+ "nor": [
+ "pissn_not_in_doaj"
+ ]
+ }
+ }
+ }
+ }
+ },
+ {
+ "name": "pissn",
+ "type": "generated",
+ "values": {
+ "eissn_in_doaj": {},
+ "pissn_in_doaj": {},
+ "eissn_not_in_doaj": {},
+ "pissn_not_in_doaj": {},
+ "!eissn_in_doaj": {
+ "constraints": {
+ "eissn": {
+ "or": [
+ "eissn_in_doaj"
+ ]
+ }
+ }
+ },
+ "!eissn_not_in_doaj": {
+ "constraints": {
+ "eissn": {
+ "or": [
+ "eissn_not_in_doaj"
+ ]
+ }
+ }
+ },
+ "!pissn_not_in_doaj": {
+ "constraints": {
+ "eissn": {
+ "or": [
+ "pissn_not_in_doaj"
+ ]
+ }
+ }
+ },
+ "!pissn_in_doaj": {
+ "constraints": {
+ "eissn": {
+ "or": [
+ "pissn_in_doaj"
+ ]
+ }
+ }
+ }
+ }
+ },
+ {
+ "name": "validated",
+ "type": "conditional",
+ "values": {
+ "yes": {
+ "conditions": [
+ {
+ "eissn": {
+ "or": [
+ "eissn_in_doaj"
+ ]
+ },
+ "pissn": {
+ "or": [
+ "pissn_in_doaj"
+ ]
+ }
+ }
+ ]
+ },
+ "no": {}
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doajtest/mocks/preservation.py b/doajtest/mocks/preservation.py
index 3c6d610d5b..7ced1a4b85 100644
--- a/doajtest/mocks/preservation.py
+++ b/doajtest/mocks/preservation.py
@@ -226,3 +226,227 @@ class PreservationMock:
"es_type": "article"
}
+ ARTICLE_DATA_JOURNAL2 = {
+ "index": {
+ "issn": [
+ "2673-611X"
+ ],
+ "date": "2016-10-01T00:00:00Z",
+ "date_toc_fv_month": "2016-10-01T00:00:00Z",
+ "subject": [
+ "Science: Natural history (General): General. Including nature conservation, geographical distribution"
+ ],
+ "schema_subject": [
+ "LCC:Neurology. Diseases of the nervous system"
+ ],
+ "classification": [
+ "Neurology. Diseases of the nervous system"
+ ],
+ "publisher": [
+ "Frontiers Media S.A."
+ ],
+ "license": [
+ "CC BY"
+ ],
+ "language": [
+ "English"
+ ],
+ "country": "United Kingdom",
+ "schema_code": [
+ "LCC:RC346-429"
+ ],
+ "classification_paths": [
+ "Medicine: Internal medicine: Neurosciences. Biological psychiatry. Neuropsychiatry: Neurology. Diseases of the nervous system"
+ ],
+ "unpunctitle": "Variation in TMEM106B in chronic traumatic encephalopathy",
+ "asciiunpunctitle": "Variation in TMEM106B in chronic traumatic encephalopathy",
+ "has_seal": "Yes",
+ "doi": "10.3389/fcosc.2022.1028295",
+ "fulltext": "https://frontiersin.org/articles/10.3389/fcosc.2022.1028295",
+ "schema_codes_tree": [
+ "LCC:RC31-1245",
+ "LCC:RC346-429",
+ "LCC:RC321-571",
+ "LCC:R"
+ ]
+ },
+ "last_updated": "2020-11-24T21:55:20Z",
+ "admin": {
+ "in_doaj": "true",
+ "seal": "true"
+ },
+ "created_date": "2018-11-04T12:37:46Z",
+ "id": "00005741594643f4996e2666a01e0310",
+ "bibjson": {
+ "title": "Variation in TMEM106B in chronic traumatic encephalopathy",
+ "year": "2018",
+ "month": "11",
+ "start_page": "1",
+ "end_page": "9",
+ "abstract": "Abstract The genetic basis of chronic traumatic encephalopathy (CTE) is poorly understood. Variation in transmembrane protein 106B (TMEM106B) has been associated with enhanced neuroinflammation during aging and with TDP-43-related neurodegenerative disease, and rs3173615, a missense coding SNP in TMEM106B, has been implicated as a functional variant in these processes. Neuroinflammation and TDP-43 pathology are prominent features in CTE. The purpose of this study was to determine whether genetic variation in TMEM106B is associated with CTE risk, pathological features, and ante-mortem dementia. Eighty-six deceased male athletes with a history of participation in American football, informant-reported Caucasian, and a positive postmortem diagnosis of CTE without comorbid neurodegenerative disease were genotyped for rs3173615. The minor allele frequency (MAF = 0.42) in participants with CTE did not differ from previously reported neurologically normal controls (MAF = 0.43). However, in a case-only analysis among CTE cases, the minor allele was associated with reduced phosphorylated tau (ptau) pathology in the dorsolateral frontal cortex (DLFC) (AT8 density, odds ratio [OR] of increasing one quartile = 0.42, 95% confidence interval [CI] 0.22–0.79, p = 0.008), reduced neuroinflammation in the DLFC (CD68 density, OR of increasing one quartile = 0.53, 95% CI 0.29–0.98, p = 0.043), and increased synaptic protein density (β = 0.306, 95% CI 0.065–0.546, p = 0.014). Among CTE cases, TMEM106B minor allele was also associated with reduced ante-mortem dementia (OR = 0.40, 95% CI 0.16–0.99, p = 0.048), but was not associated with TDP-43 pathology. All case-only models were adjusted for age at death and duration of football play. Taken together, variation in TMEM106B may have a protective effect on CTE-related outcomes.",
+ "journal": {
+ "volume": "6",
+ "number": "1",
+ "publisher": "BMC",
+ "title": "Acta Neuropathologica Communications",
+ "country": "GB",
+ "license": [
+ {
+ "title": "CC BY",
+ "type": "CC BY",
+ "url": "https://actaneurocomms.biomedcentral.com/submission-guidelines/copyright",
+ "open_access": "true"
+ }
+ ],
+ "language": [
+ "EN"
+ ],
+ "issns": [
+ "2673-611X"
+ ]
+ },
+ "identifier": [
+ {
+ "type": "doi",
+ "id": "10.1186/s40478-018-0619-9"
+ },
+ {
+ "type": "eissn",
+ "id": "2051-5960"
+ }
+ ],
+ "keywords": [
+ "Chronic traumatic encephalopathy",
+ "TMEM106B",
+ "Neuroinflammation",
+ "Football",
+ "Traumatic brain injury",
+ "Tau"
+ ],
+ "link": [
+ {
+ "type": "fulltext",
+ "url": "http://link.springer.com/article/10.1186/s40478-018-0619-9",
+ "content_type": "HTML"
+ }
+ ],
+ "subject": [
+ {
+ "scheme": "LCC",
+ "term": "Neurology. Diseases of the nervous system",
+ "code": "RC346-429"
+ }
+ ],
+ "author": [
+ {
+ "name": "Jonathan D. Cherry",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Jesse Mez",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "John F. Crary",
+ "affiliation": "Department of Pathology, Fishberg Department of Neuroscience, Friedman Brain Institute, Ronald M. Loeb Center for Alzheimer’s Disease, Icahn School of Medicine at Mount Sinai School"
+ },
+ {
+ "name": "Yorghos Tripodis",
+ "affiliation": "Department of Biostatistics, Boston University School of Public Health"
+ },
+ {
+ "name": "Victor E. Alvarez",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Ian Mahar",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Bertrand R. Huber",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Michael L. Alosco",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Raymond Nicks",
+ "affiliation": "Department of Veterans Affairs Medical Center"
+ },
+ {
+ "name": "Bobak Abdolmohammadi",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Patrick T. Kiernan",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Laney Evers",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Sarah Svirsky",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Katharine Babcock",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Hannah M. Gardner",
+ "affiliation": "VA Boston Healthcare System"
+ },
+ {
+ "name": "Gaoyuan Meng",
+ "affiliation": "VA Boston Healthcare System"
+ },
+ {
+ "name": "Christopher J. Nowinski",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Brett M. Martin",
+ "affiliation": "Department of Biostatistics, Boston University School of Public Health"
+ },
+ {
+ "name": "Brigid Dwyer",
+ "affiliation": "Department of Neurology, Boston University School of Medicine"
+ },
+ {
+ "name": "Neil W. Kowall",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Robert C. Cantu",
+ "affiliation": "Department of Anatomy and Neurobiology, Boston University School of Medicine"
+ },
+ {
+ "name": "Lee E. Goldstein",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Douglas I. Katz",
+ "affiliation": "Department of Neurology, Boston University School of Medicine"
+ },
+ {
+ "name": "Robert A. Stern",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Lindsay A. Farrer",
+ "affiliation": "Department of Neurology, Boston University School of Medicine"
+ },
+ {
+ "name": "Ann C. McKee",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ },
+ {
+ "name": "Thor D. Stein",
+ "affiliation": "Boston University Alzheimer’s Disease and CTE Center, Boston University School of Medicine"
+ }
+ ]
+ },
+ "es_type": "article"
+ }
diff --git a/doajtest/preservation_upload_test_package/invalid_article.zip b/doajtest/preservation_upload_test_package/invalid_article.zip
new file mode 100644
index 0000000000..dbbcf59bf1
Binary files /dev/null and b/doajtest/preservation_upload_test_package/invalid_article.zip differ
diff --git a/doajtest/preservation_upload_test_package/multi_journals.zip b/doajtest/preservation_upload_test_package/multi_journals.zip
new file mode 100644
index 0000000000..cd160b2c46
Binary files /dev/null and b/doajtest/preservation_upload_test_package/multi_journals.zip differ
diff --git a/doajtest/preservation_upload_test_package/valid_article.zip b/doajtest/preservation_upload_test_package/valid_article.zip
new file mode 100644
index 0000000000..883394c578
Binary files /dev/null and b/doajtest/preservation_upload_test_package/valid_article.zip differ
diff --git a/doajtest/testbook/articles_preservation/user_test_script.yml b/doajtest/testbook/articles_preservation/upload_preservation_files.yml
similarity index 53%
rename from doajtest/testbook/articles_preservation/user_test_script.yml
rename to doajtest/testbook/articles_preservation/upload_preservation_files.yml
index 051673c68d..effbe29bc5 100644
--- a/doajtest/testbook/articles_preservation/user_test_script.yml
+++ b/doajtest/testbook/articles_preservation/upload_preservation_files.yml
@@ -1,6 +1,6 @@
# ~~ ArticlesPreservation:FunctionalTest -> Preservation:Feature ~~
suite: Articles Preservation
-testset: User Test Script
+testset: Upload Preservation Files
tests:
- title: Publisher without preservation role
context:
@@ -22,7 +22,8 @@ tests:
steps:
- step: Ensure that the archive package has been created with correct structure
- step: Go to preservation area /publisher/preservation
- - step: Select 'Browse' and select the paaackage zip file
+ - step: Select 'Browse' and select the package zip file
+ resource: /preservation_upload_test_package/valid_article.zip
- step: click 'Upload'
results:
- 'A flash message appears at the top of the screen: File uploaded and waiting
@@ -40,7 +41,8 @@ tests:
steps:
- step: Create a incorrect or junk file that does not have articles
- step: Go to preservation area /publisher/preservation
- - step: Select 'Browse' and select the paaackage zip file
+ - step: Select 'Browse' and select the package zip file
+ resource: /preservation_upload_test_package/invalid_article.zip
- step: click 'Upload'
results:
- 'A flash message appears at the top of the screen: File uploaded and waiting
@@ -56,9 +58,11 @@ tests:
context:
role: publisher
steps:
- - step: Create a incorrect or junk file that does not have articles
+ - step: Make sure not to login as admin user as admin does not have restrictions. Login as publisher.
+ - step: Ensure that the archive package has been created with correct structure but the article does not own by the logged in user.
- step: Go to preservation area /publisher/preservation
- - step: Select 'Browse' and select the paaackage zip file
+ - step: Select 'Browse' and select the package zip file
+ resource: /preservation_upload_test_package/valid_article.zip
- step: click 'Upload'
results:
- 'A flash message appears at the top of the screen: File uploaded and waiting
@@ -75,6 +79,42 @@ tests:
- if the status is 'partially success', show details link will be displayed. When
clicked on the link details of how many articles successful and how many not
successful will be displayed
+- title: Upload correctly structured file with multiple journals and it is processed correctly
+ context:
+ role: publisher
+ setup:
+ - Have a publisher account which is the owner of 3 journals that are in DOAJ, and which have articles
+ uploaded for them
+ - Select 1 article from each journal and note its DOI
+ - Download the multi_journals.zip test package (see link below in second step)
+ - Unzip the multi_journals.zip test package
+ - In the identifiers.csv file in the root of the test package, put each of the DOIs from your articles
+ next to one of the article directory names listed (removing the existing test DOIs that are present in that
+ file already). For example you will have a line which reads "article1,10.1224/mydoi/3" where the DOI
+ is the one from your article.
+ - Re-zip the multi_journals.zip package. This wil be the file you will upload for testing in the below script
+ steps:
+ - step: Ensure that the archive package has been created with correct structure
+ - step: Ensure that the archive package contains more than one article with different journals.
+ A sample zip file is available for testing purpose. If there are no articles with the DOIs specified in the indentifiers.csv
+ file in your DSoace instance,
+ the results may not be as expected. In that case unzip the file and update the identifiers.csv file with appropriate identifiers for the articles.
+ resource: /preservation_upload_test_package/multi_journals.zip
+ - step: Go to preservation area /publisher/preservation
+ - step: Select 'Browse' and select the package zip file
+ - step: click 'Upload'
+ results:
+ - 'A flash message appears at the top of the screen: File uploaded and waiting
+ to be processed.'
+ - Upload status is shown in 'History of uploads' with status processing
+ - step: wait a short amount of time for the job to process, then reload the page
+ (do not re-submit the form data). If the job remains in "pending", reload the
+ page until the status changes.
+ results:
+ - Upload status in 'History of uploads' changes to 'successfully processed' and
+ the 'Notes' shows as 'uploaded to archive server'
+ - The 'Notes' has the label 'UPLOADED PACKAGES'
+ - Under the label 'UPLOADED PACKAGES', details of the packages that were uploaded are displayed in the format _.tar.gz
- title: Test maintenance mode for 'Upload preservation file' tab
context:
role: publisher
@@ -82,4 +122,4 @@ tests:
- step: Set the value of 'PRESERVATION_PAGE_UNDER_MAINTENANCE' to True in the configuration file (app.cfg/dev.cfg/test.cfg)
- step: Go to preservation area /publisher/preservation
results:
- - Maintenance page should be displayed with the content as 'This page is currently offline for maintenance'
\ No newline at end of file
+ - Maintenance page should be displayed with the content as 'This page is currently offline for maintenance'
diff --git a/doajtest/testbook/journal_form/associate_form.yml b/doajtest/testbook/journal_form/associate_form.yml
index cd333cbc2d..8a9e68b11c 100644
--- a/doajtest/testbook/journal_form/associate_form.yml
+++ b/doajtest/testbook/journal_form/associate_form.yml
@@ -70,4 +70,10 @@ tests:
- step: Attempt to click the "Remove" button
results:
- You are unable to delete the note
+ - step: Click "copy" button next to one of the fields (eg. Title)
+ results:
+ - Confirmation with fields value is displayed for 3 seconds
+ - step: Attempt to paste the value (use separate editor)
+ results:
+ - Correct value is pasted
diff --git a/doajtest/testbook/journal_form/editor_form.yml b/doajtest/testbook/journal_form/editor_form.yml
index 16b78e2c77..747bd3f81d 100644
--- a/doajtest/testbook/journal_form/editor_form.yml
+++ b/doajtest/testbook/journal_form/editor_form.yml
@@ -80,3 +80,9 @@ tests:
- step: Attempt to click the "Remove" button
results:
- You are unable to delete the note
+ - step: Click "copy" button next to one of the fields (eg. Title)
+ results:
+ - Confirmation with fields value is displayed for 3 seconds
+ - step: Attempt to paste the value (use separate editor)
+ results:
+ - Correct value is pasted
diff --git a/doajtest/testbook/journal_form/maned_form.yml b/doajtest/testbook/journal_form/maned_form.yml
index 0a05f70530..935fe4504f 100644
--- a/doajtest/testbook/journal_form/maned_form.yml
+++ b/doajtest/testbook/journal_form/maned_form.yml
@@ -120,3 +120,9 @@ tests:
- step: Attempt to click the "Remove" button
results:
- You are unable to delete the note
+ - step: Click "copy" button next to one of the fields (eg. Title)
+ results:
+ - Confirmation with fields value is displayed for 3 seconds
+ - step: Attempt to paste the value (use separate editor)
+ results:
+ - Correct value is pasted
diff --git a/doajtest/testbook/new_application_form/associate_editor_form.yml b/doajtest/testbook/new_application_form/associate_editor_form.yml
index 366fac92c4..f9f9fd4619 100644
--- a/doajtest/testbook/new_application_form/associate_editor_form.yml
+++ b/doajtest/testbook/new_application_form/associate_editor_form.yml
@@ -63,3 +63,9 @@ tests:
- step: Attempt to click the "Remove" button
results:
- You are unable to delete the note
+ - step: Click "copy" button next to one of the fields (eg. Title)
+ results:
+ - Confirmation with fields value is displayed for 3 seconds
+ - step: Attempt to paste the value (use separate editor)
+ results:
+ - Correct value is pasted
diff --git a/doajtest/testbook/new_application_form/editor_form.yml b/doajtest/testbook/new_application_form/editor_form.yml
index cd9b8edf3d..b9db0066f7 100644
--- a/doajtest/testbook/new_application_form/editor_form.yml
+++ b/doajtest/testbook/new_application_form/editor_form.yml
@@ -64,4 +64,10 @@ tests:
- you are unable to edit the note
- step: Attempt to click the "Remove" button
results:
- - You are unable to delete the note
\ No newline at end of file
+ - You are unable to delete the note
+ - step: Click "copy" button next to one of the fields (eg. Title)
+ results:
+ - Confirmation with fields value is displayed for 3 seconds
+ - step: Attempt to paste the value (use separate editor)
+ results:
+ - Correct value is pasted
\ No newline at end of file
diff --git a/doajtest/testbook/new_application_form/maned_form.yml b/doajtest/testbook/new_application_form/maned_form.yml
index 907791691b..98dc0211f6 100644
--- a/doajtest/testbook/new_application_form/maned_form.yml
+++ b/doajtest/testbook/new_application_form/maned_form.yml
@@ -95,3 +95,9 @@ tests:
- step: Attempt to click the "Remove" button
results:
- You are unable to delete the note
+ - step: Click "copy" button next to one of the fields (eg. Title)
+ results:
+ - Confirmation with fields value is displayed for 3 seconds
+ - step: Attempt to paste the value (use separate editor)
+ results:
+ - Correct value is pasted
diff --git a/doajtest/testbook/public_site/home_page.yml b/doajtest/testbook/public_site/home_page.yml
index c2261786cc..625df716f7 100644
--- a/doajtest/testbook/public_site/home_page.yml
+++ b/doajtest/testbook/public_site/home_page.yml
@@ -120,3 +120,22 @@ tests:
bottom right-hand corner.
results:
- You are returned to the top of the home page
+- title: Skip to main content button (Accessibility)
+ context:
+ role: anonymous
+ steps:
+ - step: Refresh the page
+ - step: Click tab key on the keyboard once
+ results:
+ - Skip to the main content button is unfolded and focused
+ - step: Click enter
+ results:
+ - Focus is moved to the main content
+ - step: Turn on screen reader
+ - step: With the keyboard navigate to Skip to main content button
+ results:
+ - Screen reader reads the button title
+ - step: Click enter
+ results:
+ - Focus is moved to the main content
+
diff --git a/doajtest/testbook/public_site/public_search.yml b/doajtest/testbook/public_site/public_search.yml
index 1bce101f8c..6b47834e85 100644
--- a/doajtest/testbook/public_site/public_search.yml
+++ b/doajtest/testbook/public_site/public_search.yml
@@ -166,3 +166,25 @@ tests:
results:
- You are taken to the full text of this article on the Web. It opens in a new
tab
+- title: 'Test Public Search Results Display: Accessibility'
+ context:
+ role: anonymous
+ steps:
+ - step: Go to the DOAJ search page at /search/articles
+ - step: Turn on a screen reader
+ results:
+ - Extendable facets are focusable and focus is marked with an orange solid border
+ - The screenreader gives the header role ("button")
+ - The screenreader gives the state of the facet ("extended" or "folded")
+ - step: click spacebar to fold/unfold the facet
+ resuts:
+ - screenreader gives correct state of the facet ("extended" or "folded")
+ - step: click tab
+ results:
+ - focus is on the list of checkboxes
+ results:
+ - focus is clearly marked by the outline
+ - step: click spacebar to check the filter
+ results:
+ - filter is applied
+
diff --git a/doajtest/unit/application_processors/test_application_processor_emails.py b/doajtest/unit/application_processors/test_application_processor_emails.py
index 15df1fd727..036c86c68a 100644
--- a/doajtest/unit/application_processors/test_application_processor_emails.py
+++ b/doajtest/unit/application_processors/test_application_processor_emails.py
@@ -65,13 +65,14 @@ def editor_account_pull(self, _id):
ACTUAL_ACCOUNT_PULL = models.Account.pull
# A regex string for searching the log entries
-email_log_regex = 'template.*%s.*to:\[u{0,1}\'%s.*subject:.*%s'
+email_log_regex = r'template.*%s.*to:\[u{0,1}\'%s.*subject:.*%s'
# A string present in each email log entry (for counting them)
email_count_string = 'Email template'
NOTIFICATIONS_INTERCEPT = []
+
class TestPublicApplicationEmails(DoajTestCase):
def setUp(self):
super(TestPublicApplicationEmails, self).setUp()
diff --git a/doajtest/unit/resources/harvester_resp.json b/doajtest/unit/resources/harvester_resp.json
index dc24cb7dd9..133fedaf24 100644
--- a/doajtest/unit/resources/harvester_resp.json
+++ b/doajtest/unit/resources/harvester_resp.json
@@ -45,8 +45,8 @@
"journal": {
"title": "My Journal",
"medlineAbbreviation": "My Jour",
- "essn": "1234-5678",
- "issn": "9876-5432",
+ "issn": "1234-5678",
+ "essn": "9876-5432",
"isoabbreviation": "My Jour",
"nlmid": "123456789"
}
@@ -143,8 +143,8 @@
"journal": {
"title": "My Journal",
"medlineAbbreviation": "My Jour",
- "essn": "1234-5678",
- "issn": "9876-5432",
+ "issn": "1234-5678",
+ "essn": "9876-5432",
"isoabbreviation": "My Jour",
"nlmid": "123456789"
}
diff --git a/doajtest/unit/resources/invalid_article.zip b/doajtest/unit/resources/invalid_article.zip
new file mode 120000
index 0000000000..e955a12f14
--- /dev/null
+++ b/doajtest/unit/resources/invalid_article.zip
@@ -0,0 +1 @@
+../../preservation_upload_test_package/invalid_article.zip
\ No newline at end of file
diff --git a/doajtest/unit/resources/multi_journals.zip b/doajtest/unit/resources/multi_journals.zip
new file mode 120000
index 0000000000..ac64455e26
--- /dev/null
+++ b/doajtest/unit/resources/multi_journals.zip
@@ -0,0 +1 @@
+../../preservation_upload_test_package/multi_journals.zip
\ No newline at end of file
diff --git a/doajtest/unit/resources/preservation_multiple_journals.zip b/doajtest/unit/resources/preservation_multiple_journals.zip
new file mode 100644
index 0000000000..a55b9decd2
Binary files /dev/null and b/doajtest/unit/resources/preservation_multiple_journals.zip differ
diff --git a/doajtest/unit/resources/valid_article.zip b/doajtest/unit/resources/valid_article.zip
new file mode 120000
index 0000000000..ebf17a3099
--- /dev/null
+++ b/doajtest/unit/resources/valid_article.zip
@@ -0,0 +1 @@
+../../preservation_upload_test_package/valid_article.zip
\ No newline at end of file
diff --git a/doajtest/unit/test_article_acceptable_and_permissions.py b/doajtest/unit/test_article_acceptable_and_permissions.py
index eb4c04d4fb..5e0328635f 100644
--- a/doajtest/unit/test_article_acceptable_and_permissions.py
+++ b/doajtest/unit/test_article_acceptable_and_permissions.py
@@ -14,6 +14,11 @@ def is_acceptable_load_cases():
"test_id",
{"test_id": []})
+def issn_validation_against_journal_load_sets():
+ return load_parameter_sets(rel2abs(__file__, "..", "matrices", "article_create_article"), "issn_validation_against_journal",
+ "test_id",
+ {"test_id": []})
+
class TestBLLPrepareUpdatePublisher(DoajTestCase):
@@ -110,4 +115,73 @@ def test_has_permissions(self):
assert failed_result["unowned"].sort() == [pissn, eissn].sort()
# assert failed_result == {'success': 0, 'fail': 1, 'update': 0, 'new': 0, 'shared': [],
# 'unowned': [pissn, eissn],
- # 'unmatched': []}, "received: {}".format(failed_result)
\ No newline at end of file
+ # 'unmatched': []}, "received: {}".format(failed_result)
+
+
+ @parameterized.expand(issn_validation_against_journal_load_sets)
+ def test_issn_validation_against_journal_load_sets(self, value, kwargs):
+ kwpissn = kwargs.get("pissn")
+ kweissn = kwargs.get("eissn")
+ validated = kwargs.get("validated")
+
+ js = JournalFixtureFactory.make_many_journal_sources(2)
+ journal_in_doaj = Journal(**js[0])
+ journal_in_doaj.set_in_doaj(True)
+ journal_in_doaj.bibjson().pissn = "1111-1111"
+ journal_in_doaj.bibjson().eissn = "2222-2222"
+ journal_in_doaj.save(blocking=True)
+
+ journal_not_in_doaj = Journal(**js[1])
+ journal_not_in_doaj.set_in_doaj(False)
+ journal_not_in_doaj.bibjson().pissn = "3333-3333"
+ journal_not_in_doaj.bibjson().eissn = "4444-4444"
+ journal_not_in_doaj.save(blocking=True)
+
+ if (kwpissn == "pissn_in_doaj"):
+ pissn = journal_in_doaj.bibjson().pissn
+ elif (kwpissn == "eissn_in_doaj"):
+ pissn = journal_in_doaj.bibjson().eissn
+ elif (kwpissn == "pissn_not_in_doaj"):
+ pissn = journal_not_in_doaj.bibjson().pissn
+ else:
+ pissn = journal_not_in_doaj.bibjson().eissn
+
+ if (kweissn == "pissn_in_doaj"):
+ eissn = journal_in_doaj.bibjson().pissn
+ elif (kweissn == "eissn_in_doaj"):
+ eissn = journal_in_doaj.bibjson().eissn
+ elif (kweissn == "pissn_not_in_doaj"):
+ eissn = journal_not_in_doaj.bibjson().pissn
+ else:
+ eissn = journal_not_in_doaj.bibjson().eissn
+
+
+ art_source = ArticleFixtureFactory.make_article_source(pissn=pissn, eissn=eissn)
+ article = Article(**art_source)
+
+ if validated:
+ self.assertIsNone(self.svc.is_acceptable(article))
+
+ else:
+ with self.assertRaises(exceptions.ArticleNotAcceptable):
+ self.svc.is_acceptable(article)
+
+ def test_check_validation_for_2_journals(self):
+
+ js = JournalFixtureFactory.make_many_journal_sources(2, in_doaj=True)
+ journal_in_doaj = Journal(**js[0])
+ journal_in_doaj.bibjson().pissn = "1111-1111"
+ journal_in_doaj.bibjson().eissn = "2222-2222"
+ journal_in_doaj.save(blocking=True)
+
+ journal_not_in_doaj = Journal(**js[1])
+ journal_not_in_doaj.bibjson().pissn = "3333-3333"
+ journal_not_in_doaj.bibjson().eissn = "4444-4444"
+ journal_not_in_doaj.save(blocking=True)
+
+
+ art_source = ArticleFixtureFactory.make_article_source(pissn="1111-1111", eissn="4444-4444")
+ article = Article(**art_source)
+
+ with self.assertRaises(exceptions.ArticleNotAcceptable):
+ self.svc.is_acceptable(article)
\ No newline at end of file
diff --git a/doajtest/unit/test_bll_article_batch_create_article.py b/doajtest/unit/test_bll_article_batch_create_article.py
index 6cda9ee82c..34f537c7a8 100644
--- a/doajtest/unit/test_bll_article_batch_create_article.py
+++ b/doajtest/unit/test_bll_article_batch_create_article.py
@@ -5,7 +5,7 @@
from doajtest.helpers import DoajTestCase
from portality.bll import DOAJ
from portality.bll import exceptions
-from portality.models import Article, Account,Journal
+from portality.models import Article, Account, Journal
from portality.lib.paths import rel2abs
from doajtest.mocks.bll_article import BLLArticleMockFactory
from doajtest.mocks.model_Article import ModelArticleMockFactory
@@ -37,12 +37,14 @@ def setUp(self):
self._get_duplicate = self.svc.get_duplicate
self._issn_ownership_status = self.svc.issn_ownership_status
self._get_journal = Article.get_journal
+ self._find_by_issn_exact = Journal.find_by_issn_exact
def tearDown(self):
self.svc.is_legitimate_owner = self._is_legitimate_owner
self.svc.get_duplicate = self._get_duplicate
self.svc.issn_ownership_status = self._issn_ownership_status
Article.get_journal = self._get_journal
+ Journal.find_by_issn_exact = self._find_by_issn_exact
super(TestBLLArticleBatchCreateArticle, self).tearDown()
@parameterized.expand(load_cases)
@@ -118,8 +120,8 @@ def test_01_batch_create_article(self, name, kwargs):
article = Article(**source)
article.set_id()
articles.append(article)
- if add_journal_info:
- journal_specs.append({"title" : "0", "pissn" : "0000-0000", "eissn" : "0000-0001"})
+ # We always need a journal to exist for an article to be created
+ journal_specs.append({"title" : "0", "pissn" : "0000-0000", "eissn" : "0000-0001"})
# another with a DOI and no fulltext
source = ArticleFixtureFactory.make_article_source(
@@ -132,8 +134,7 @@ def test_01_batch_create_article(self, name, kwargs):
article = Article(**source)
article.set_id()
articles.append(article)
- if add_journal_info:
- journal_specs.append({"title" : "1", "pissn" : "1111-1112", "eissn" : "1111-1111"})
+ journal_specs.append({"title" : "1", "pissn" : "1111-1112", "eissn" : "1111-1111"})
# one with a fulltext and no DOI
source = ArticleFixtureFactory.make_article_source(
@@ -146,8 +147,7 @@ def test_01_batch_create_article(self, name, kwargs):
article = Article(**source)
article.set_id()
articles.append(article)
- if add_journal_info:
- journal_specs.append({"title" : "2", "pissn" : "2222-2222", "eissn" : "2222-2223"})
+ journal_specs.append({"title" : "2", "pissn" : "2222-2222", "eissn" : "2222-2223"})
# another one with a fulltext and no DOI
source = ArticleFixtureFactory.make_article_source(
@@ -160,8 +160,7 @@ def test_01_batch_create_article(self, name, kwargs):
article = Article(**source)
article.set_id()
articles.append(article)
- if add_journal_info:
- journal_specs.append({"title" : "3", "pissn" : "3333-3333", "eissn" : "3333-3334"})
+ journal_specs.append({"title" : "3", "pissn" : "3333-3333", "eissn" : "3333-3334"})
last_issn = "3333-3333"
last_doi = "10.123/abc/1"
@@ -180,8 +179,7 @@ def test_01_batch_create_article(self, name, kwargs):
article = Article(**source)
article.set_id()
articles.append(article)
- if add_journal_info:
- journal_specs.append({"title" : "4", "pissn" : "4444-4444", "eissn" : "4444-4445"})
+ journal_specs.append({"title" : "4", "pissn" : "4444-4444", "eissn" : "4444-4445"})
# one with a duplicated Fulltext
source = ArticleFixtureFactory.make_article_source(
@@ -194,8 +192,7 @@ def test_01_batch_create_article(self, name, kwargs):
article = Article(**source)
article.set_id()
articles.append(article)
- if add_journal_info:
- journal_specs.append({"title" : "5", "pissn" : "5555-5555", "eissn" : "5555-5556"})
+ journal_specs.append({"title" : "5", "pissn" : "5555-5555", "eissn" : "5555-5556"})
ilo_mock = None
if account_arg == "owner":
@@ -224,6 +221,18 @@ def test_01_batch_create_article(self, name, kwargs):
gj_mock = ModelArticleMockFactory.get_journal(journal_specs, in_doaj=journal_in_doaj)
Article.get_journal = gj_mock
+ # We need the journal to be in the index for the ArticleAcceptable checks FIXME: too slow, mock this
+ #[Journal(**js['instance']).save(blocking=True) for js in journal_specs]
+
+ # We need to retrieve the correct Journal by its ISSNs
+ def mock_find(issns: list, in_doaj=None, max=2):
+ for j in journal_specs:
+ if sorted([j['eissn'], j['pissn']]) == sorted(issns):
+ return [j['instance']]
+ return []
+
+ Journal.find_by_issn_exact = mock_find
+
###########################################################
# Execution
diff --git a/doajtest/unit/test_bll_article_create_article.py b/doajtest/unit/test_bll_article_create_article.py
index f595a1b96e..d9d524efe7 100644
--- a/doajtest/unit/test_bll_article_create_article.py
+++ b/doajtest/unit/test_bll_article_create_article.py
@@ -35,7 +35,6 @@ def setUp(self):
self.prepare_update_admin = self.svc._prepare_update_admin
self.prepare_update_publisher = self.svc._prepare_update_publisher
-
def tearDown(self):
super(TestBLLArticleCreateArticle, self).tearDown()
diff --git a/doajtest/unit/test_formrender.py b/doajtest/unit/test_formrender.py
index 1d655420af..f57ee83a61 100644
--- a/doajtest/unit/test_formrender.py
+++ b/doajtest/unit/test_formrender.py
@@ -6,11 +6,16 @@
# Form context for basic test
################################################################
+
class TestForm(Form):
+ __test__ = False # Prevent collection by PyTest
one = StringField("One")
two = StringField("Two")
+
class TestRenderer(Renderer):
+ __test__ = False # Prevent collection by PyTest
+
def __init__(self):
super(TestRenderer, self).__init__()
self.FIELD_GROUPS = {
@@ -20,7 +25,10 @@ def __init__(self):
]
}
+
class TestContext(FormContext):
+ __test__ = False # Prevent collection by PyTest
+
def data2form(self):
self.form = TestForm(formdata=self.form_data)
diff --git a/doajtest/unit/test_models.py b/doajtest/unit/test_models.py
index 5551cdcf5d..06175e6d76 100644
--- a/doajtest/unit/test_models.py
+++ b/doajtest/unit/test_models.py
@@ -1661,3 +1661,30 @@ def test_get_name_safe(self):
# account does not exist
assert models.Account.get_name_safe('not existing account id') == ''
+ def test_11_find_by_issn(self):
+ js = JournalFixtureFactory.make_many_journal_sources(2, in_doaj=True)
+ j1 = models.Journal(**js[0])
+ j1.bibjson().pissn = "1111-1111"
+ j1.bibjson().eissn = "2222-2222"
+ j1.save(blocking=True)
+
+ j2 = models.Journal(**js[1])
+ j2.bibjson().pissn = "3333-3333"
+ j2.bibjson().eissn = "4444-4444"
+ j2.save(blocking=True)
+
+ journals = models.Journal.find_by_issn(["1111-1111", "2222-2222"], True)
+ assert len(journals) == 1
+ assert journals[0].id == j1.id
+
+ journals = models.Journal.find_by_issn(["1111-1111", "3333-3333"], True)
+ assert len(journals) == 2
+ assert journals[0].id == j1.id
+ assert journals[1].id == j2.id
+
+ journals = models.Journal.find_by_issn_exact(["1111-1111", "2222-2222"], True)
+ assert len(journals) == 1
+ assert journals[0].id == j1.id
+
+ journals = models.Journal.find_by_issn_exact(["1111-1111", "3333-3333"], True)
+ assert len(journals) == 0
\ No newline at end of file
diff --git a/doajtest/unit/test_oaipmh.py b/doajtest/unit/test_oaipmh.py
index b65d319bd0..bab8102499 100644
--- a/doajtest/unit/test_oaipmh.py
+++ b/doajtest/unit/test_oaipmh.py
@@ -245,7 +245,7 @@ def test_06_identify(self):
records = t.xpath('/oai:OAI-PMH/oai:Identify', namespaces=self.oai_ns)
assert len(records) == 1
assert records[0].xpath('//oai:repositoryName', namespaces=self.oai_ns)[0].text == 'Directory of Open Access Journals'
- assert records[0].xpath('//oai:adminEmail', namespaces=self.oai_ns)[0].text == 'sysadmin@cottagelabs.com'
+ assert records[0].xpath('//oai:adminEmail', namespaces=self.oai_ns)[0].text == 'helpdesk+oai@doaj.org'
assert records[0].xpath('//oai:granularity', namespaces=self.oai_ns)[0].text == 'YYYY-MM-DDThh:mm:ssZ'
def test_07_bad_verb(self):
diff --git a/doajtest/unit/test_task_preservation.py b/doajtest/unit/test_task_preservation.py
index 29c170f742..d8d62d87e8 100644
--- a/doajtest/unit/test_task_preservation.py
+++ b/doajtest/unit/test_task_preservation.py
@@ -12,34 +12,80 @@
from portality.models.article import Article
-class TestPreservation(DoajTestCase):
+def mock_pull_by_key(key, value):
+ if value == "http://link.springer.com/article/10.1186/s40478-018-0619-9":
+ article = Article()
+ article.data = PreservationMock.ARTICLE_DATA
+ return article
+ elif value == "https://www.frontiersin.org/articles/10.3389/fcosc.2022.1028295":
+ article = Article()
+ article.data = PreservationMock.ARTICLE_DATA_JOURNAL2
+ return article
- def setUp(self):
- super(TestPreservation, self).setUp()
- articles_zip_path = test_constants.PATH_RESOURCES / "articles.zip"
+
+def mock_requests_post(*args, **kwargs):
+ class MockResponse:
+ def __init__(self, json_data, status_code):
+ self.json_data = json_data
+ self.status_code = status_code
+
+ def json(self):
+ return self.json_data
+
+ if not args[0] == None and kwargs["data"]["org"] == "DOAJ":
+ return MockResponse({
+ "files": [
+ {
+ "name": "name_of_tarball.tar.gz",
+ "sha256": "decafbad"
+ }
+ ]
+ }, 200)
+
+ return MockResponse(None, 404)
+
+
+def mock_owner_of_article(*args, **kwargs):
+ return True
+
+
+class TestPreservationSetup(DoajTestCase):
+
+ def initial_setup(self, package_name):
+ super(TestPreservationSetup, self).setUp()
+ articles_zip_path = test_constants.PATH_RESOURCES / package_name
with open(articles_zip_path, 'rb') as zf:
- self.zip_file = FileStorage(BytesIO(zf.read()), filename="articles.zip")
+ self.zip_file = FileStorage(BytesIO(zf.read()), filename=package_name)
self.upload_dir = app.config.get("UPLOAD_DIR", ".")
created_time = dates.now_str("%Y-%m-%d-%H-%M-%S")
- owner = "rama"
- dir_name = owner + "-" + created_time
+ self.owner = "rama"
+ self.journal_dir = "2051-5960"
+ dir_name = self.owner + "-" + created_time
self.local_dir = os.path.join(preservation.Preservation.UPLOAD_DIR, dir_name)
- self.preserve = preservation.Preservation(self.local_dir, owner)
- self.package = preservation.PreservationPackage(self.preserve.preservation_dir, owner)
- self.local_dir = os.path.join(self.local_dir,"tmp")
+ self.preserve = preservation.Preservation(self.local_dir, self.owner)
+ self.tmp_dir = os.path.join(self.local_dir, "tmp")
self.preservation_collection = app.config.get("PRESERVATION_COLLECTION")
- app.config["PRESERVATION_COLLECTION"] = {"rama":["test","2"]}
+ app.config["PRESERVATION_COLLECTION"] = {"rama": ["test", "2"]}
def tearDown(self):
- super(TestPreservation, self).tearDown()
+ super(TestPreservationSetup, self).tearDown()
preservation.Preservation.delete_local_directory(self.local_dir)
app.config["PRESERVATION_COLLECTION"] = self.preservation_collection
+
+class TestPreservation(TestPreservationSetup):
+
+ def setUp(self):
+ super(TestPreservation, self).initial_setup("articles.zip")
+
+ def tearDown(self):
+ super(TestPreservation, self).tearDown()
+
def test_local_directory(self):
- #Test creation of local directory
- #TestPreservation.preserve.create_local_directories()
+ # Test creation of local directory
+ # TestPreservation.preserve.create_local_directories()
job = preservation.PreservationBackgroundTask.prepare("rama", upload_file=self.zip_file)
params = job.params
local_dir = params["preserve__local_dir"]
@@ -48,81 +94,137 @@ def test_local_directory(self):
assert os.path.isdir(os.path.join(self.upload_dir, dir_name))
assert os.path.isdir(os.path.join(self.upload_dir, dir_name,dir_name))
- #Test deletion of local directory
+ # Test deletion of local directory
preservation.Preservation.delete_local_directory(local_dir)
assert not os.path.exists(os.path.join(self.upload_dir, dir_name))
- def mock_pull_by_key(key, value):
- article = Article()
- article.data = PreservationMock.ARTICLE_DATA
- return article
+ @patch.object(Article, 'pull_by_key', mock_pull_by_key)
+ @patch.object(requests, "post", mock_requests_post)
+ @patch.object(preservation.Preservation, 'owner_of_article', mock_owner_of_article)
+ def test_preservation(self):
+ self.preserve.save_file(self.zip_file)
+
+ assert os.path.exists(os.path.join(self.tmp_dir, self.zip_file.filename))
+
+ # Test extraction of zip file
+ self.preserve.extract_zip_file()
+
+ assert os.path.exists(os.path.join(self.tmp_dir, "articles"))
+ assert os.path.isdir(os.path.join(self.tmp_dir, "articles"))
+ assert os.path.isdir(os.path.join(self.tmp_dir, "articles", "article_1"))
+ assert os.path.exists(os.path.join(self.tmp_dir, "articles",
+ "article_1", "identifier.txt"))
+
+ reader = preservation.CSVReader(os.path.join(self.tmp_dir,
+ "articles", "identifiers.csv"))
+ data = reader.articles_info()
+
+ assert "article_1" in data
+ assert "article/10.1186/s40478-018-0619-9" in data["article_1"][0]
+
+ # Test package structure
+ self.preserve.create_package_structure()
+ package_dir = os.path.join(self.upload_dir,
+ self.preserve.dir_name, self.preserve.dir_name, self.journal_dir)
+ tag_manifest_file = os.path.join(package_dir, "00003741594643f4996e2555a01e03c7", "tagmanifest-sha256.txt")
+ manifest_file = os.path.join(package_dir, "00003741594643f4996e2555a01e03c7", "manifest-sha256.txt")
+ assert os.path.exists(package_dir)
+ assert os.path.exists(tag_manifest_file)
+ assert os.path.exists(manifest_file)
+
+ package = preservation.PreservationPackage(self.preserve.preservation_dir, self.journal_dir, self.owner)
+
+ # Test creation of tar file
+ package.create_package()
+ tar_file = package_dir + "_" + package.created_time + ".tar.gz"
+ assert os.path.exists(tar_file)
+
+ sha256 = package.sha256(tar_file)
+ response = package.upload_package(sha256, tar_file)
+ assert response.status_code == 200
+
+ def test_get_article_info(self):
+ issn, article_id, metadata_json = self.preserve.get_article_info(PreservationMock.ARTICLE_DATA)
- def mock_requests_post(*args, **kwargs):
- class MockResponse:
- def __init__(self, json_data, status_code):
- self.json_data = json_data
- self.status_code = status_code
+ assert issn == "2051-5960"
+ assert article_id == "00003741594643f4996e2555a01e03c7"
+ assert metadata_json["bibjson"]["identifier"][0]["id"] == "10.1186/s40478-018-0619-9"
- def json(self):
- return self.json_data
- if not args[0] == None and kwargs["data"]["org"] == "DOAJ":
- return MockResponse({
- "files": [
- {
- "name": "name_of_tarball.tar.gz",
- "sha256": "decafbad"
- }
- ]
- }, 200)
+class TestPreservationMultipleJournals(TestPreservationSetup):
- return MockResponse(None, 404)
+ def setUp(self):
+ super(TestPreservationMultipleJournals, self).initial_setup("preservation_multiple_journals.zip")
+ self.another_journal_dir = "2673-611X"
- def mock_owner_of_article(*args, **kwargs):
- return True
+ def tearDown(self):
+ super(TestPreservationMultipleJournals, self).tearDown()
@patch.object(Article, 'pull_by_key', mock_pull_by_key)
- @patch.object(requests,"post", mock_requests_post)
+ @patch.object(requests, "post", mock_requests_post)
@patch.object(preservation.Preservation, 'owner_of_article', mock_owner_of_article)
- def test_preservation(self):
+ def test_preservation_multiple_journals(self):
self.preserve.save_file(self.zip_file)
- assert os.path.exists(os.path.join(self.local_dir, self.zip_file.filename))
-
# Test extraction of zip file
self.preserve.extract_zip_file()
- assert os.path.exists(os.path.join(self.local_dir, "articles"))
- assert os.path.isdir(os.path.join(self.local_dir, "articles"))
- assert os.path.isdir(os.path.join(self.local_dir, "articles", "article_1"))
- assert os.path.exists(os.path.join(self.local_dir, "articles",
- "article_1", "identifier.txt"))
+ assert os.path.exists(os.path.join(self.tmp_dir, "articles"))
+ assert os.path.isdir(os.path.join(self.tmp_dir, "articles"))
+ assert os.path.isdir(os.path.join(self.tmp_dir, "articles", "article_1"))
+ assert os.path.exists(os.path.join(self.tmp_dir, "articles",
+ "article_1", "Identifier.txt"))
- reader = preservation.CSVReader(os.path.join(self.local_dir,
- "articles", "identifiers.csv"))
+ reader = preservation.CSVReader(os.path.join(self.tmp_dir,
+ "articles", "Identifiers.csv"))
data = reader.articles_info()
assert "article_1" in data
assert "article/10.1186/s40478-018-0619-9" in data["article_1"][0]
+ assert "article_2" in data
+ assert "10.3389/fcosc.2022.1028295" in data["article_2"][0]
+
# Test package structure
self.preserve.create_package_structure()
package_dir = os.path.join(self.upload_dir,
- self.preserve.dir_name, self.preserve.dir_name)
- tag_manifest_file = os.path.join(package_dir, "2051-5960", "00003741594643f4996e2555a01e03c7", "tagmanifest-sha256.txt")
- manifest_file = os.path.join(package_dir,"2051-5960", "00003741594643f4996e2555a01e03c7", "manifest-sha256.txt")
+ self.preserve.dir_name, self.preserve.dir_name, self.journal_dir)
+ tag_manifest_file = os.path.join(package_dir, "00003741594643f4996e2555a01e03c7", "tagmanifest-sha256.txt")
+ manifest_file = os.path.join(package_dir, "00003741594643f4996e2555a01e03c7", "manifest-sha256.txt")
assert os.path.exists(package_dir)
assert os.path.exists(tag_manifest_file)
assert os.path.exists(manifest_file)
+ package = preservation.PreservationPackage(self.preserve.preservation_dir, self.journal_dir, self.owner)
+
# Test creation of tar file
- self.package.create_package()
- assert os.path.exists(package_dir + ".tar.gz")
+ package.create_package()
+ tar_file = package_dir + "_" + package.created_time + ".tar.gz"
+ assert os.path.exists(tar_file)
- sha256 = self.package.sha256()
- response = self.package.upload_package(sha256)
+ sha256 = package.sha256(tar_file)
+ response = package.upload_package(sha256, tar_file)
assert response.status_code == 200
+ # Test another journal package
+ package_dir = os.path.join(self.upload_dir,
+ self.preserve.dir_name, self.preserve.dir_name, self.another_journal_dir)
+ tag_manifest_file = os.path.join(package_dir, "00005741594643f4996e2666a01e0310", "tagmanifest-sha256.txt")
+ manifest_file = os.path.join(package_dir, "00005741594643f4996e2666a01e0310", "manifest-sha256.txt")
+ assert os.path.exists(package_dir)
+ assert os.path.exists(tag_manifest_file)
+ assert os.path.exists(manifest_file)
+
+ package = preservation.PreservationPackage(self.preserve.preservation_dir, self.another_journal_dir, self.owner)
+
+ # Test creation of tar file for another journal
+ package.create_package()
+ tar_file = package_dir + "_" + package.created_time + ".tar.gz"
+ assert os.path.exists(tar_file)
+
+ sha256 = package.sha256(tar_file)
+ response = package.upload_package(sha256, tar_file)
+ assert response.status_code == 200
def test_get_article_info(self):
issn, article_id, metadata_json = self.preserve.get_article_info(PreservationMock.ARTICLE_DATA)
diff --git a/doajtest/unit/test_tasks_ingestCrossref442Articles.py b/doajtest/unit/test_tasks_ingestCrossref442Articles.py
index 2714b33644..ed2236552c 100644
--- a/doajtest/unit/test_tasks_ingestCrossref442Articles.py
+++ b/doajtest/unit/test_tasks_ingestCrossref442Articles.py
@@ -1315,11 +1315,11 @@ def test_40_crossref_2_journals_different_owners_issn_each_fail(self):
found = [a for a in models.Article.find_by_issns(["1234-5678", "9876-5432"])]
assert len(found) == 0
- def test_41_crossref_2_journals_same_owner_issn_each_success(self):
+ def test_41_crossref_2_journals_same_owner_issn_each_fail(self):
etree.XMLSchema = self.mock_load_schema
# Create 2 journals with the same owner, each with one different issn. The article's 2 issns
# match each of these issns
- # We expect a successful article ingest
+ # We expect a failed ingest - an article must match with only ONE journal
j1 = models.Journal()
j1.set_owner("testowner")
@@ -1365,19 +1365,19 @@ def test_41_crossref_2_journals_same_owner_issn_each_success(self):
fu = models.FileUpload.pull(id)
assert fu is not None
- assert fu.status == "processed"
- assert fu.imported == 1
+ assert fu.status == "failed"
+ assert fu.imported == 0
assert fu.updates == 0
- assert fu.new == 1
+ assert fu.new == 0
fr = fu.failure_reasons
+ assert len(fr) > 0
assert len(fr.get("shared", [])) == 0
assert len(fr.get("unowned", [])) == 0
- assert len(fr.get("unmatched", [])) == 0
+ assert len(fr.get("unmatched", [])) == 2
found = [a for a in models.Article.find_by_issns(["1234-5678", "9876-5432"])]
- assert len(found) == 1
-
+ assert len(found) == 0
def test_42_crossref_2_journals_different_owners_different_issns_mixed_article_fail(self):
etree.XMLSchema = self.mock_load_schema
diff --git a/doajtest/unit/test_tasks_ingestCrossref531Articles.py b/doajtest/unit/test_tasks_ingestCrossref531Articles.py
index 27308a3d22..09edcf1b1d 100644
--- a/doajtest/unit/test_tasks_ingestCrossref531Articles.py
+++ b/doajtest/unit/test_tasks_ingestCrossref531Articles.py
@@ -624,7 +624,7 @@ def test_23_crossref_process_success(self):
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
- j.save()
+ j.save(blocking=True)
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
@@ -634,6 +634,7 @@ def test_23_crossref_process_success(self):
# push an article to initialise the mappings
source = ArticleFixtureFactory.make_article_source()
article = models.Article(**source)
+ article.bibjson().add_identifier(bj.P_ISSN, "1234-5678")
article.save(blocking=True)
article.delete()
models.Article.blockdeleted(article.id)
diff --git a/doajtest/unit/test_tasks_ingestDOAJarticles.py b/doajtest/unit/test_tasks_ingestDOAJarticles.py
index 2872124a47..a2eb5f2be9 100644
--- a/doajtest/unit/test_tasks_ingestDOAJarticles.py
+++ b/doajtest/unit/test_tasks_ingestDOAJarticles.py
@@ -1260,10 +1260,10 @@ def test_40_doaj_2_journals_different_owners_issn_each_fail(self):
found = [a for a in models.Article.find_by_issns(["1234-5678", "9876-5432"])]
assert len(found) == 0
- def test_41_doaj_2_journals_same_owner_issn_each_success(self):
+ def test_41_doaj_2_journals_same_owner_issn_each_fail(self):
# Create 2 journals with the same owner, each with one different issn. The article's 2 issns
# match each of these issns
- # We expect a successful article ingest
+ # We expect a failed article ingest - articles must match only ONE journal
j1 = models.Journal()
j1.set_owner("testowner")
bj1 = j1.bibjson()
@@ -1301,18 +1301,18 @@ def test_41_doaj_2_journals_same_owner_issn_each_success(self):
fu = models.FileUpload.pull(id)
assert fu is not None
- assert fu.status == "processed"
- assert fu.imported == 1
+ assert fu.status == "failed"
+ assert fu.imported == 0
assert fu.updates == 0
- assert fu.new == 1
+ assert fu.new == 0
fr = fu.failure_reasons
assert len(fr.get("shared", [])) == 0
assert len(fr.get("unowned", [])) == 0
- assert len(fr.get("unmatched", [])) == 0
+ assert len(fr.get("unmatched", [])) == 2 # error message for each article
found = [a for a in models.Article.find_by_issns(["1234-5678", "9876-5432"])]
- assert len(found) == 1
+ assert len(found) == 0
def test_42_doaj_2_journals_different_owners_different_issns_mixed_article_fail(self):
# Create 2 different journals with different owners and different issns (2 each).
diff --git a/doajtest/unit/test_toc.py b/doajtest/unit/test_toc.py
index bfe6564d8c..dafe64ac31 100644
--- a/doajtest/unit/test_toc.py
+++ b/doajtest/unit/test_toc.py
@@ -1,16 +1,57 @@
-from doajtest.helpers import DoajTestCase
from doajtest.fixtures import ArticleFixtureFactory, JournalFixtureFactory
+from doajtest.helpers import DoajTestCase
+from portality import app as _app # noqa, make sure route is registered
from portality import models
+from portality.util import url_for
+
+
+def _test_toc_uses_both_issns_when_available(app_test, url_name):
+ j = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))
+ pissn = j.bibjson().first_pissn
+ eissn = j.bibjson().first_eissn
+ j.set_last_manual_update()
+ j.save(blocking=True)
+ a = models.Article(**ArticleFixtureFactory.make_article_source(pissn=pissn, eissn=eissn, in_doaj=True))
+ a.save(blocking=True)
+ with app_test.test_client() as t_client:
+ response = t_client.get(url_for(url_name, identifier=j.bibjson().get_preferred_issn()))
+ assert response.status_code == 200
+ assert pissn in response.data.decode()
+ assert eissn in response.data.decode()
+
+
+def _test_toc_correctly_uses_pissn(app_test, url_name):
+ j = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))
+ pissn = j.bibjson().first_pissn
+ # remove eissn
+ del j.bibjson().eissn
+ j.set_last_manual_update()
+ j.save(blocking=True)
+ a = models.Article(**ArticleFixtureFactory.make_article_source(pissn=pissn, in_doaj=True))
+ a.save(blocking=True)
+ with app_test.test_client() as t_client:
+ response = t_client.get(url_for(url_name, identifier=j.bibjson().get_preferred_issn()))
+ assert response.status_code == 200
+ assert pissn in response.data.decode()
+
+
+def _test_toc_correctly_uses_eissn(app_test, url_name):
+ j = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))
+ eissn = j.bibjson().first_eissn
+ # remove pissn
+ del j.bibjson().pissn
+ j.set_last_manual_update()
+ j.save(blocking=True)
+ a = models.Article(**ArticleFixtureFactory.make_article_source(pissn=eissn, in_doaj=True))
+ a.save(blocking=True)
+ with app_test.test_client() as t_client:
+ response = t_client.get(url_for(url_name, identifier=j.bibjson().get_preferred_issn()))
+ assert response.status_code == 200
+ assert eissn in response.data.decode()
class TestTOC(DoajTestCase):
- def setUp(self):
- super(TestTOC, self).setUp()
-
- def tearDown(self):
- super(TestTOC, self).tearDown()
-
def test_01_article_index_date_parsing(self):
""" The ToC date histogram needs an accurate datestamp in the article's index """
a = models.Article(**ArticleFixtureFactory.make_article_source())
@@ -43,9 +84,9 @@ def test_01_article_index_date_parsing(self):
d = a.bibjson().get_publication_date()
assert d == '2012-03-01T00:00:00Z'
- a.bibjson().year = '86' # beware: this test will give a false negative 70 years from
- a.bibjson().month = '11' # the time of writing; this gives adequate warning (24 years)
- d = a.bibjson().get_publication_date() # to fix hard-coding of centuries in get_publication_date().
+ a.bibjson().year = '86' # beware: this test will give a false negative 70 years from
+ a.bibjson().month = '11' # the time of writing; this gives adequate warning (24 years)
+ d = a.bibjson().get_publication_date() # to fix hard-coding of centuries in get_publication_date().
assert d == '1986-11-01T00:00:00Z'
# Check we can handle numeric months
@@ -90,45 +131,21 @@ def test_02_toc_requirements(self):
assert a.data['index']['date_toc_fv_month'] == a.data['index']['date'] == "1991-01-01T00:00:00Z"
def test_03_toc_uses_both_issns_when_available(self):
- j = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))
- pissn = j.bibjson().first_pissn
- eissn = j.bibjson().first_eissn
- j.set_last_manual_update()
- j.save(blocking=True)
- a = models.Article(**ArticleFixtureFactory.make_article_source(pissn=pissn, eissn=eissn, in_doaj=True))
- a.save(blocking=True)
- with self.app_test.test_client() as t_client:
- response = t_client.get('/toc/{}'.format(j.bibjson().get_preferred_issn()))
- assert response.status_code == 200
- assert pissn in response.data.decode()
- assert eissn in response.data.decode()
+ _test_toc_uses_both_issns_when_available(self.app_test, 'doaj.toc')
+
+ def test_04_toc_correctly_uses_pissn(self):
+ _test_toc_correctly_uses_pissn(self.app_test, 'doaj.toc')
+
+ def test_05_toc_correctly_uses_eissn(self):
+ _test_toc_correctly_uses_eissn(self.app_test, 'doaj.toc')
+
+
+class TestTOCArticles(DoajTestCase):
+ def test_03_toc_uses_both_issns_when_available(self):
+ _test_toc_uses_both_issns_when_available(self.app_test, 'doaj.toc_articles')
def test_04_toc_correctly_uses_pissn(self):
- j = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))
- pissn = j.bibjson().first_pissn
- # remove eissn
- del j.bibjson().eissn
-
- j.set_last_manual_update()
- j.save(blocking=True)
- a = models.Article(**ArticleFixtureFactory.make_article_source(pissn=pissn, in_doaj=True))
- a.save(blocking=True)
- with self.app_test.test_client() as t_client:
- response = t_client.get('/toc/{}'.format(j.bibjson().get_preferred_issn()))
- assert response.status_code == 200
- assert pissn in response.data.decode()
+ _test_toc_correctly_uses_pissn(self.app_test, 'doaj.toc_articles')
def test_05_toc_correctly_uses_eissn(self):
- j = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))
- eissn = j.bibjson().first_eissn
- # remove pissn
- del j.bibjson().pissn
-
- j.set_last_manual_update()
- j.save(blocking=True)
- a = models.Article(**ArticleFixtureFactory.make_article_source(pissn=eissn, in_doaj=True))
- a.save(blocking=True)
- with self.app_test.test_client() as t_client:
- response = t_client.get('/toc/{}'.format(j.bibjson().get_preferred_issn()))
- assert response.status_code == 200
- assert eissn in response.data.decode()
+ _test_toc_correctly_uses_eissn(self.app_test, 'doaj.toc_articles')
diff --git a/portality/bll/exceptions.py b/portality/bll/exceptions.py
index 3bb676f984..005ad7f31c 100644
--- a/portality/bll/exceptions.py
+++ b/portality/bll/exceptions.py
@@ -66,6 +66,7 @@ class ArticleNotAcceptable(Exception):
"""
def __init__(self, *args, **kwargs):
self.message = kwargs.get("message", "")
+ self.result = kwargs.get("result", {})
super(ArticleNotAcceptable, self).__init__(*args)
def __str__(self):
diff --git a/portality/bll/services/article.py b/portality/bll/services/article.py
index 7b55894d24..b5e829cd24 100644
--- a/portality/bll/services/article.py
+++ b/portality/bll/services/article.py
@@ -56,6 +56,9 @@ def batch_create_articles(self, articles, account, duplicate_check=True, merge_d
all_unowned = set()
all_unmatched = set()
+ # Hold on to the exception so we can raise it later
+ e_not_acceptable = None
+
for article in articles:
try:
# ~~!ArticleBatchCreate:Feature->ArticleCreate:Feature~~
@@ -67,6 +70,10 @@ def batch_create_articles(self, articles, account, duplicate_check=True, merge_d
dry_run=True)
except (exceptions.ArticleMergeConflict, exceptions.ConfigurationException):
raise exceptions.IngestException(message=Messages.EXCEPTION_ARTICLE_BATCH_CONFLICT)
+ except exceptions.ArticleNotAcceptable as e:
+ # The ArticleNotAcceptable exception is a superset of reasons we can't match a journal to this article
+ e_not_acceptable = e
+ result = {'fail': 1, 'unmatched': set(article.bibjson().issns())}
success += result.get("success", 0)
fail += result.get("fail", 0)
@@ -90,6 +97,8 @@ def batch_create_articles(self, articles, account, duplicate_check=True, merge_d
# return some stats on the import
return report
else:
+ if e_not_acceptable is not None:
+ raise exceptions.ArticleNotAcceptable(message=e_not_acceptable.message, result=report)
raise exceptions.IngestException(message=Messages.EXCEPTION_ARTICLE_BATCH_FAIL, result=report)
@staticmethod
@@ -159,9 +168,6 @@ def _validate_issns(article_bibjson: models.ArticleBibJSON):
if len(pissn) > 1 or len(eissn) > 1:
raise exceptions.ArticleNotAcceptable(message=Messages.EXCEPTION_TOO_MANY_ISSNS)
- pissn = article_bibjson.get_one_identifier("pissn")
- eissn = article_bibjson.get_one_identifier("eissn")
-
# no pissn or eissn
if not pissn and not eissn:
raise exceptions.ArticleNotAcceptable(message=Messages.EXCEPTION_NO_ISSNS)
@@ -204,18 +210,18 @@ def create_article(self, article, account, duplicate_check=True, merge_duplicate
{"arg": update_article_id, "instance": str, "allow_none": True, "arg_name": "update_article_id"}
], exceptions.ArgumentException)
- # quickly validate that the article is acceptable - it must have a DOI and/or a fulltext
- # this raises an exception if the article is not acceptable, containing all the relevant validation details
+ has_permissions_result = self.has_permissions(account, article, limit_to_account)
+ if isinstance(has_permissions_result, dict):
+ return has_permissions_result
+ # Validate that the article is acceptable: it must have a DOI and/or a fulltext & match only one in_doaj journal
+ # this raises an exception if the article is not acceptable, containing all the relevant validation details
+ # We do this after the permissions check because that gives a detailed result whereas this throws an exception
try:
self.is_acceptable(article)
except Exception as e:
raise e
- has_permissions_result = self.has_permissions(account, article, limit_to_account)
- if isinstance(has_permissions_result,dict):
- return has_permissions_result
-
is_update = 0
if duplicate_check:
# ~~!ArticleCreate:Feature->ArticleDeduplication:Feature~~
@@ -252,7 +258,8 @@ def has_permissions(self, account, article, limit_to_account):
def is_acceptable(self, article: models.Article):
"""
conduct some deep validation on the article to make sure we will accept it
- or the moment, this just means making sure it has a DOI and a fulltext
+ this just means making sure it has a DOI and a fulltext, and that its ISSNs
+ match a single journal
"""
try:
bj = article.bibjson()
@@ -266,12 +273,40 @@ def is_acceptable(self, article: models.Article):
raise exceptions.ArticleNotAcceptable(message=Messages.EXCEPTION_NO_DOI_NO_FULLTEXT)
self._validate_issns(bj)
+ journal = self.match_journal_with_validation(bj)
# is journal in doaj (we do this check last as it has more performance impact)
- journal = article.get_journal()
if journal is None or not journal.is_in_doaj():
raise exceptions.ArticleNotAcceptable(message=Messages.EXCEPTION_ADDING_ARTICLE_TO_WITHDRAWN_JOURNAL)
+ @staticmethod
+ def match_journal_with_validation(article_bibjson: models.ArticleBibJSON):
+ pissn = article_bibjson.get_one_identifier("pissn")
+ eissn = article_bibjson.get_one_identifier("eissn")
+
+ issns = []
+
+ if pissn is not None:
+ issns.append(pissn)
+ if eissn is not None:
+ issns.append(eissn)
+
+ # Find an exact match, whether in_doaj or not
+ journal = models.Journal.find_by_issn_exact(issns)
+
+ # check if only one journal matches pissn and eissn and if they are in the correct fields
+ # no need to check eissn, if pissn matches, pissn and eissn are different and only 1 journal has been found - then eissn matches too
+ if len(journal) != 1:
+ raise exceptions.ArticleNotAcceptable(message=Messages.EXCEPTION_MISMATCHED_ISSNS)
+ if pissn is not None:
+ if journal[0].bibjson().pissn != pissn:
+ raise exceptions.ArticleNotAcceptable(message=Messages.EXCEPTION_MISMATCHED_ISSNS)
+ if eissn is not None:
+ if journal[0].bibjson().eissn != eissn:
+ raise exceptions.ArticleNotAcceptable(message=Messages.EXCEPTION_MISMATCHED_ISSNS)
+
+ return journal[0]
+
@staticmethod
def is_legitimate_owner(article, owner):
"""
@@ -369,6 +404,10 @@ def issn_ownership_status(article, owner):
issns = b.get_identifiers(b.P_ISSN)
issns += b.get_identifiers(b.E_ISSN)
+ # FIXME: Duplicate check due to inconsistent control flow (result vs exception)
+ if len(issns) == 0:
+ raise exceptions.ArticleNotAcceptable(message=Messages.EXCEPTION_NO_ISSNS)
+
owned = []
shared = []
unowned = []
diff --git a/portality/bll/services/background_task_status.py b/portality/bll/services/background_task_status.py
index 486fdb1d84..ae0c6b7908 100644
--- a/portality/bll/services/background_task_status.py
+++ b/portality/bll/services/background_task_status.py
@@ -95,7 +95,7 @@ def create_queues_status(self, queue_name) -> dict:
# prepare for err_msgs
limited_sec = app.config.get('BG_MONITOR_LAST_COMPLETED', {}).get(queue_name)
if limited_sec is None:
- app.logger.warn(f'BG_MONITOR_LAST_COMPLETED for {queue_name} not found ')
+ app.logger.warning(f'BG_MONITOR_LAST_COMPLETED for {queue_name} not found ')
err_msgs = []
if limited_sec is not None and last_completed_date:
diff --git a/portality/bll/services/journal.py b/portality/bll/services/journal.py
index f9b6eefbab..d27956d700 100644
--- a/portality/bll/services/journal.py
+++ b/portality/bll/services/journal.py
@@ -8,10 +8,11 @@
from portality import lock
from portality.bll.doaj import DOAJ
from portality.lib.dates import FMT_DATETIME_SHORT
-from portality.store import StoreFactory, prune_container
+from portality.store import StoreFactory, prune_container, StoreException
from portality.crosswalks.journal_questions import Journal2QuestionXwalk
+from portality.util import no_op
-from datetime import datetime
+from datetime import datetime, timedelta
import re, csv, random, string
@@ -115,7 +116,7 @@ def journal(self, journal_id, lock_journal=False, lock_account=None, lock_timeou
return journal, the_lock
- def csv(self, prune=True):
+ def csv(self, prune=True, logger=None):
"""
Generate the Journal CSV
@@ -127,39 +128,55 @@ def csv(self, prune=True):
"""
# first validate the incoming arguments to ensure that we've got the right thing
argvalidate("csv", [
- {"arg": prune, "allow_none" : False, "arg_name" : "prune"}
+ {"arg": prune, "allow_none" : False, "arg_name" : "prune"},
+ {"arg": logger, "allow_none": True, "arg_name": "logger"}
], exceptions.ArgumentException)
+ # None isn't executable, so convert logger to NO-OP
+ if logger is None:
+ logger = no_op
+
# ~~->FileStoreTemp:Feature~~
filename = 'journalcsv__doaj_' + dates.now_str(FMT_DATETIME_SHORT) + '_utf8.csv'
container_id = app.config.get("STORE_CACHE_CONTAINER")
tmpStore = StoreFactory.tmp()
- out = tmpStore.path(container_id, filename, create_container=True, must_exist=False)
+ try:
+ out = tmpStore.path(container_id, filename, create_container=True, must_exist=False)
+ logger("Temporary CSV will be written to {x}".format(x=out))
+ except StoreException as e:
+ logger("Could not create temporary CSV file: {x}".format(x=e))
+ raise e
with open(out, 'w', encoding='utf-8') as csvfile:
- self._make_journals_csv(csvfile)
+ self._make_journals_csv(csvfile, logger=logger)
+ logger("Wrote CSV to output file {x}".format(x=out))
# ~~->FileStore:Feature~~
mainStore = StoreFactory.get("cache")
try:
mainStore.store(container_id, filename, source_path=out)
url = mainStore.url(container_id, filename)
+ logger("Stored CSV in main cache store at {x}".format(x=url))
finally:
tmpStore.delete_file(container_id, filename) # don't delete the container, just in case someone else is writing to it
+ logger("Deleted file from tmp store")
action_register = []
if prune:
+ logger("Pruning old CSVs from store")
def sort(filelist):
rx = "journalcsv__doaj_(.+?)_utf8.csv"
return sorted(filelist, key=lambda x: datetime.strptime(re.match(rx, x).groups(1)[0], FMT_DATETIME_SHORT), reverse=True)
def _filter(f_name):
return f_name.startswith("journalcsv__")
- action_register = prune_container(mainStore, container_id, sort, filter=_filter, keep=2)
+ action_register = prune_container(mainStore, container_id, sort, filter=_filter, keep=2, logger=logger)
+ logger("Pruned old CSVs from store")
# update the ES record to point to the new file
# ~~-> Cache:Model~~
models.Cache.cache_csv(url)
+ logger("Stored CSV URL in ES Cache")
return url, action_register
def admin_csv(self, file_path, account_sub_length=8, obscure_accounts=True, add_sensitive_account_info=False):
@@ -207,11 +224,12 @@ def acc_email(j):
self._make_journals_csv(f, extra_cols)
@staticmethod
- def _make_journals_csv(file_object, additional_columns=None):
+ def _make_journals_csv(file_object, additional_columns=None, logger=None):
"""
Make a CSV file of information for all journals.
:param file_object: a utf8 encoded file object.
"""
+ logger = logger if logger is not None else lambda x: x
YES_NO = {True: 'Yes', False: 'No', None: '', '': ''}
def _get_doaj_meta_kvs(journal):
@@ -241,38 +259,64 @@ def _get_article_kvs(journal):
return kvs
# ~~!JournalCSV:Feature->Journal:Model~~
- cols = {}
- for j in models.Journal.all_in_doaj(page_size=1000): #Fixme: limited by ES, this may not be sufficient
+ csvwriter = csv.writer(file_object)
+ first = True
+ for j in models.Journal.all_in_doaj(page_size=100):
+ export_start = datetime.utcnow()
+ logger("Exporting journal {x}".format(x=j.id))
+
+ time_log = []
bj = j.bibjson()
issn = bj.get_one_identifier(idtype=bj.P_ISSN)
if issn is None:
issn = bj.get_one_identifier(idtype=bj.E_ISSN)
+ time_log.append("{x} - got issn".format(x=datetime.utcnow()))
+
if issn is None:
continue
# ~~!JournalCSV:Feature->JournalQuestions:Crosswalk~~
kvs = Journal2QuestionXwalk.journal2question(j)
+ time_log.append("{x} - crosswalked questions".format(x=datetime.utcnow()))
meta_kvs = _get_doaj_meta_kvs(j)
+ time_log.append("{x} - got meta kvs".format(x=datetime.utcnow()))
article_kvs = _get_article_kvs(j)
+ time_log.append("{x} - got article kvs".format(x=datetime.utcnow()))
additionals = []
if additional_columns is not None:
for col in additional_columns:
additionals += col(j)
- cols[issn] = kvs + meta_kvs + article_kvs + additionals
+ time_log.append("{x} - got additionals".format(x=datetime.utcnow()))
+ row = kvs + meta_kvs + article_kvs + additionals
# Get the toc URL separately from the meta kvs because it needs to be inserted earlier in the CSV
# ~~-> ToC:WebRoute~~
toc_kv = _get_doaj_toc_kv(j)
- cols[issn].insert(2, toc_kv)
-
- issns = cols.keys()
+ row.insert(2, toc_kv)
+ time_log.append("{x} - got toc kvs".format(x=datetime.utcnow()))
- csvwriter = csv.writer(file_object)
- qs = None
- for i in sorted(issns):
- if qs is None:
- qs = [q for q, _ in cols[i]]
+ if first is True:
+ qs = [q for q, _ in row]
csvwriter.writerow(qs)
- vs = [v for _, v in cols[i]]
+ first = False
+
+ vs = [v for _, v in row]
csvwriter.writerow(vs)
+ time_log.append("{x} - written row to csv".format(x=datetime.utcnow()))
+
+ export_end = datetime.utcnow()
+ if export_end - export_start > timedelta(seconds=10):
+ for l in time_log:
+ logger(l)
+
+ logger("All journals exported and CSV written")
+ # issns = cols.keys()
+ # qs = None
+ # for i in sorted(issns):
+ # if qs is None:
+ # qs = [q for q, _ in cols[i]]
+ # csvwriter.writerow(qs)
+ # vs = [v for _, v in cols[i]]
+ # csvwriter.writerow(vs)
+ # logger("CSV Written")
diff --git a/portality/core.py b/portality/core.py
index 6d2d29c332..f9d9f07318 100644
--- a/portality/core.py
+++ b/portality/core.py
@@ -190,11 +190,11 @@ def initialise_index(app, conn, only_mappings=None):
:return:
"""
if not app.config['INITIALISE_INDEX']:
- app.logger.warn('INITIALISE_INDEX config var is not True, initialise_index command cannot run')
+ app.logger.warning('INITIALISE_INDEX config var is not True, initialise_index command cannot run')
return
if app.config.get("READ_ONLY_MODE", False) and app.config.get("SCRIPTS_READ_ONLY_MODE", False):
- app.logger.warn("System is in READ-ONLY mode, initialise_index command cannot run")
+ app.logger.warning("System is in READ-ONLY mode, initialise_index command cannot run")
return
# get the app mappings
diff --git a/portality/dao.py b/portality/dao.py
index 9d7d4755b2..bfbe95b0cf 100644
--- a/portality/dao.py
+++ b/portality/dao.py
@@ -137,7 +137,7 @@ def save(self, retries=0, back_off_factor=1, differentiate=False, blocking=False
:return:
"""
if app.config.get("READ_ONLY_MODE", False) and app.config.get("SCRIPTS_READ_ONLY_MODE", False):
- app.logger.warn("System is in READ-ONLY mode, save command cannot run")
+ app.logger.warning("System is in READ-ONLY mode, save command cannot run")
return
if retries > app.config.get("ES_RETRY_HARD_LIMIT", 1000): # an arbitrary large number
@@ -221,7 +221,7 @@ def save(self, retries=0, back_off_factor=1, differentiate=False, blocking=False
def delete(self):
if app.config.get("READ_ONLY_MODE", False) and app.config.get("SCRIPTS_READ_ONLY_MODE", False):
- app.logger.warn("System is in READ-ONLY mode, delete command cannot run")
+ app.logger.warning("System is in READ-ONLY mode, delete command cannot run")
return
# r = requests.delete(self.target() + self.id)
@@ -314,7 +314,7 @@ def bulk(cls, documents: List[dict], idkey='id', refresh=False, action='index',
"""
# ~~->ReadOnlyMode:Feature~~
if app.config.get("READ_ONLY_MODE", False) and app.config.get("SCRIPTS_READ_ONLY_MODE", False):
- app.logger.warn("System is in READ-ONLY mode, bulk command cannot run")
+ app.logger.warning("System is in READ-ONLY mode, bulk command cannot run")
return
if action not in ['index', 'update', 'delete']:
@@ -364,7 +364,7 @@ def refresh(cls):
:return:
"""
if app.config.get("READ_ONLY_MODE", False) and app.config.get("SCRIPTS_READ_ONLY_MODE", False):
- app.logger.warn("System is in READ-ONLY mode, refresh command cannot run")
+ app.logger.warning("System is in READ-ONLY mode, refresh command cannot run")
return
# r = requests.post(cls.target() + '_refresh', headers=CONTENT_TYPE_JSON)
@@ -450,7 +450,7 @@ def send_query(cls, qobj, retry=50, **kwargs):
@classmethod
def remove_by_id(cls, id):
if app.config.get("READ_ONLY_MODE", False) and app.config.get("SCRIPTS_READ_ONLY_MODE", False):
- app.logger.warn("System is in READ-ONLY mode, delete_by_id command cannot run")
+ app.logger.warning("System is in READ-ONLY mode, delete_by_id command cannot run")
return
# r = requests.delete(cls.target() + id)
@@ -462,7 +462,7 @@ def remove_by_id(cls, id):
@classmethod
def delete_by_query(cls, query):
if app.config.get("READ_ONLY_MODE", False) and app.config.get("SCRIPTS_READ_ONLY_MODE", False):
- app.logger.warn("System is in READ-ONLY mode, delete_by_query command cannot run")
+ app.logger.warning("System is in READ-ONLY mode, delete_by_query command cannot run")
return
#r = requests.delete(cls.target() + "_query", data=json.dumps(query))
@@ -473,7 +473,7 @@ def delete_by_query(cls, query):
@classmethod
def destroy_index(cls):
if app.config.get("READ_ONLY_MODE", False) and app.config.get("SCRIPTS_READ_ONLY_MODE", False):
- app.logger.warn("System is in READ-ONLY mode, destroy_index command cannot run")
+ app.logger.warning("System is in READ-ONLY mode, destroy_index command cannot run")
return
# if app.config['ELASTIC_SEARCH_INDEX_PER_TYPE']:
diff --git a/portality/forms/application_forms.py b/portality/forms/application_forms.py
index 0065b765fc..e66a8e2ef2 100644
--- a/portality/forms/application_forms.py
+++ b/portality/forms/application_forms.py
@@ -167,11 +167,25 @@ class FieldDefinitions:
"full_contents" # ~~^->FullContents:FormWidget~~
],
"contexts": {
+ "admin": {
+ "widgets": [
+ "trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
+ "click_to_copy", # ~~^-> ClickToCopy:FormWidget~~
+ ]
+ },
"editor": {
- "disabled": True
+ "disabled": True,
+ "widgets": [
+ "trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
+ "click_to_copy", # ~~^-> ClickToCopy:FormWidget~~
+ ]
},
"associate_editor": {
- "disabled": True
+ "disabled": True,
+ "widgets": [
+ "trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
+ "click_to_copy", # ~~^-> ClickToCopy:FormWidget~~
+ ]
},
"update_request": {
"disabled": True
@@ -198,6 +212,24 @@ class FieldDefinitions:
"contexts": {
"update_request": {
"disabled": True
+ },
+ "admin": {
+ "widgets": [
+ "trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
+ "click_to_copy", # ~~^-> ClickToCopy:FormWidget~~
+ ]
+ },
+ "associate_editor": {
+ "widgets": [
+ "trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
+ "click_to_copy", # ~~^-> ClickToCopy:FormWidget~~
+ ]
+ },
+ "editor": {
+ "widgets": [
+ "trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
+ "click_to_copy", # ~~^-> ClickToCopy:FormWidget~~
+ ]
}
}
}
@@ -448,7 +480,7 @@ class FieldDefinitions:
],
"widgets": [
"trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
- {"autocomplete": {"type" : "journal", "field": "bibjson.publisher.name.exact"}},
+ {"autocomplete": {"type" : "journal", "field": "bibjson.publisher.name.exact"}}, # ~~^-> Autocomplete:FormWidget~~
"full_contents" # ~~^->FullContents:FormWidget~~
],
"help": {
@@ -457,6 +489,27 @@ class FieldDefinitions:
"contexts" : {
"bulk_edit" : {
"validate" : []
+ },
+ "admin": {
+ "widgets": [
+ "trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
+ {"autocomplete": {"type": "journal", "field": "bibjson.publisher.name.exact"}}, # ~~^-> Autocomplete:FormWidget~~
+ "click_to_copy", # ~~^-> ClickToCopy:FormWidget~~
+ ]
+ },
+ "associate_editor": {
+ "widgets": [
+ "trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
+ {"autocomplete": {"type": "journal", "field": "bibjson.publisher.name.exact"}}, # ~~^-> Autocomplete:FormWidget~~
+ "click_to_copy", # ~~^-> ClickToCopy:FormWidget~~
+ ]
+ },
+ "editor": {
+ "widgets": [
+ "trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
+ {"autocomplete": {"type": "journal", "field": "bibjson.publisher.name.exact"}}, # ~~^-> Autocomplete:FormWidget~~
+ "click_to_copy", # ~~^-> ClickToCopy:FormWidget~~
+ ]
}
}
}
@@ -507,9 +560,35 @@ class FieldDefinitions:
},
"widgets": [
"trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
- {"autocomplete": {"type" : "journal", "field": "bibjson.institution.name.exact"}},
+ {"autocomplete": {"type" : "journal", "field": "bibjson.institution.name.exact"}}, # ~~^-> Autocomplete:FormWidget~~
"full_contents" # ~~^->FullContents:FormWidget~~
- ]
+ ],
+ "contexts": {
+ "admin": {
+ "widgets": [
+ "trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
+ {"autocomplete": {"type": "journal", "field": "bibjson.institution.name.exact"}},
+ # ~~^-> Autocomplete:FormWidget~~
+ "click_to_copy", # ~~^-> ClickToCopy:FormWidget~~
+ ]
+ },
+ "associate_editor": {
+ "widgets": [
+ "trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
+ {"autocomplete": {"type": "journal", "field": "bibjson.institution.name.exact"}},
+ # ~~^-> Autocomplete:FormWidget~~
+ "click_to_copy", # ~~^-> ClickToCopy:FormWidget~~
+ ]
+ },
+ "editor": {
+ "widgets": [
+ "trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
+ {"autocomplete": {"type": "journal", "field": "bibjson.institution.name.exact"}},
+ # ~~^-> Autocomplete:FormWidget~~
+ "click_to_copy", # ~~^-> ClickToCopy:FormWidget~~
+ ]
+ }
+ }
}
# ~~->$ InstitutionCountry:FormField~~
@@ -770,7 +849,7 @@ class FieldDefinitions:
}
],
"widgets" : [
- "trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
+ "trim_whitespace" # ~~^-> TrimWhitespace:FormWidget~~
],
"asynchronous_warning": [
{"warn_on_value": {"value": "None"}}
@@ -1376,7 +1455,7 @@ class FieldDefinitions:
],
"widgets": [
"trim_whitespace", # ~~^-> TrimWhitespace:FormWidget~~
- "clickable_url" # ~~^-> ClickableURL:FormWidget~~
+ "clickable_url", # ~~^-> ClickableURL:FormWidget~~
],
"contexts" : {
"public" : {
@@ -1581,7 +1660,7 @@ class FieldDefinitions:
"owner_exists"
],
"widgets": [
- {"autocomplete": {"type" : "account", "field": "id", "include" : False}},
+ {"autocomplete": {"type" : "account", "field": "id", "include" : False}}, # ~~^-> Autocomplete:FormWidget~~
"clickable_owner"
],
"contexts" : {
@@ -1639,7 +1718,7 @@ class FieldDefinitions:
"label": "Group",
"input": "text",
"widgets": [
- {"autocomplete": {"type" : "editor_group", "field": "name", "include" : False}}
+ {"autocomplete": {"type" : "editor_group", "field": "name", "include" : False}} # ~~^-> Autocomplete:FormWidget~~
],
"contexts" : {
"editor" : {
@@ -1647,7 +1726,7 @@ class FieldDefinitions:
},
"admin" : {
"widgets" : [
- {"autocomplete": {"type": "editor_group", "field": "name", "include" : False}},
+ {"autocomplete": {"type": "editor_group", "field": "name", "include" : False}}, # ~~^-> Autocomplete:FormWidget~~
{"load_editors" : {"field" : "editor"}}
]
}
@@ -2936,6 +3015,7 @@ def wtforms(field, settings):
JAVASCRIPT_FUNCTIONS = {
"clickable_url": "formulaic.widgets.newClickableUrl", # ~~-> ClickableURL:FormWidget~~
+ "click_to_copy": "formulaic.widgets.newClickToCopy", # ~~-> ClickToCopy:FormWidget~~
"clickable_owner": "formulaic.widgets.newClickableOwner", # ~~-> ClickableOwner:FormWidget~~
"select": "formulaic.widgets.newSelect", # ~~-> SelectBox:FormWidget~~
"taglist": "formulaic.widgets.newTagList", # ~~-> TagList:FormWidget~~
diff --git a/portality/forms/application_processors.py b/portality/forms/application_processors.py
index 13a294d14d..1cd426c1f6 100644
--- a/portality/forms/application_processors.py
+++ b/portality/forms/application_processors.py
@@ -198,8 +198,11 @@ def _patch_target_note_id(self):
for note in self.target.notes:
note_date = dates.parse(note['date'])
if not note.get('author_id') and note_date > dates.before_now(60):
- note['author_id'] = current_user.id
-
+ try:
+ note['author_id'] = current_user.id
+ except AttributeError:
+ # Skip if we don't have a current_user
+ pass
class NewApplication(ApplicationProcessor):
@@ -307,7 +310,6 @@ def patch_target(self):
if (self.target.owner is None or self.target.owner == "") and (self.source.owner is not None):
self.target.set_owner(self.source.owner)
-
def finalise(self, account, save_target=True, email_alert=True):
"""
account is the administrator account carrying out the action
@@ -326,7 +328,6 @@ def finalise(self, account, save_target=True, email_alert=True):
elif not j.is_in_doaj():
raise Exception(Messages.EXCEPTION_EDITING_WITHDRAWN_JOURNAL)
-
# if we are allowed to finalise, kick this up to the superclass
super(AdminApplication, self).finalise()
@@ -813,7 +814,6 @@ def patch_target(self):
if (self.target.owner is None or self.target.owner == "") and (self.source.owner is not None):
self.target.set_owner(self.source.owner)
-
def finalise(self):
# FIXME: this first one, we ought to deal with outside the form context, but for the time being this
# can be carried over from the old implementation
diff --git a/portality/lib/plausible.py b/portality/lib/plausible.py
index 2aa602d986..90b1b8f46b 100644
--- a/portality/lib/plausible.py
+++ b/portality/lib/plausible.py
@@ -62,7 +62,7 @@ def send_event(goal: str, on_completed=None, **props_kwargs):
def _send():
resp = requests.post(plausible_api_url, json=payload, headers=headers)
if resp.status_code >= 300:
- logger.warning(f'send plausible event api fail. [{resp.status_code}][{resp.text}]')
+ logger.warning(f'Send plausible event API fail. snd: [{resp.url}] [{headers}] [{payload}] rcv: [{resp.status_code}] [{resp.text}]')
if on_completed:
on_completed(resp)
diff --git a/portality/models/background.py b/portality/models/background.py
index ac3d3bfc65..604eccc95d 100644
--- a/portality/models/background.py
+++ b/portality/models/background.py
@@ -152,13 +152,16 @@ def pretty_audit(self):
class StdOutBackgroundJob(BackgroundJob):
- def __init__(self, inner):
+ def __init__(self, inner, force_logging=False):
super(StdOutBackgroundJob, self).__init__(**inner.data)
+ self._force_logging = force_logging
def add_audit_message(self, msg, timestamp=None):
super(StdOutBackgroundJob, self).add_audit_message(msg, timestamp)
- if app.config.get("DOAJENV") == 'dev':
- print(msg)
+ if app.config.get("DOAJENV") == 'dev' or self._force_logging:
+ if timestamp is None:
+ timestamp = dates.now_str_with_microseconds()
+ print("[" + timestamp + "] " + msg)
# ~~-> DataObj:Library~~
diff --git a/portality/models/preservation.py b/portality/models/preservation.py
index c89c2f3f6f..cdaffb2708 100644
--- a/portality/models/preservation.py
+++ b/portality/models/preservation.py
@@ -99,6 +99,10 @@ def no_files_articles(self, articles_list):
if articles_list is not None and len(articles_list) > 0:
self.data["articles_info"]["no_files_articles"] = ", ".join(articles_list)
+ def uploaded_journals(self, uploaded_journals):
+ if uploaded_journals is not None and len(uploaded_journals) > 0:
+ self.data["articles_info"]["uploaded_journals"] = ", ".join(uploaded_journals)
+
@classmethod
def by_owner(cls, owner, size=10):
q = OwnerFileQuery(owner)
diff --git a/portality/models/v2/journal.py b/portality/models/v2/journal.py
index ac1ce42585..efa6aa53e4 100644
--- a/portality/models/v2/journal.py
+++ b/portality/models/v2/journal.py
@@ -70,6 +70,22 @@ def find_by_issn(cls, issns, in_doaj=None, max=10):
records = [cls(**r.get("_source")) for r in result.get("hits", {}).get("hits", [])]
return records
+ @classmethod
+ def find_by_issn_exact(cls, issns, in_doaj=None, max=2):
+ """
+ Finds journal that matches given issns exactly - if no data problems should always be only 1
+ """
+ if not isinstance(issns, list):
+ issns = [issns]
+ if len(issns) > 2:
+ return []
+ q = JournalQuery()
+ q.find_by_issn_exact(issns, in_doaj=in_doaj, max=max)
+ result = cls.query(q=q.query)
+ # create an array of objects, using cls rather than Journal, which means subclasses can use it too
+ records = [cls(**r.get("_source")) for r in result.get("hits", {}).get("hits", [])]
+ return records
+
@classmethod
def issns_by_owner(cls, owner, in_doaj=None):
q = IssnQuery(owner, in_doaj=in_doaj)
@@ -922,6 +938,16 @@ class JournalQuery(object):
}
}
+ must_query = {
+ "track_total_hits": True,
+ "query": {
+ "bool": {
+ "must": [
+ ]
+ }
+ }
+ }
+
all_doaj = {
"track_total_hits": True,
"query": {
@@ -947,6 +973,14 @@ def find_by_issn(self, issns, in_doaj=None, max=10):
self.query["query"]["bool"]["must"].append({"term": {"admin.in_doaj": in_doaj}})
self.query["size"] = max
+ def find_by_issn_exact(self, issns, in_doaj=None, max=10):
+ self.query = deepcopy(self.must_query)
+ for issn in issns:
+ self.query["query"]["bool"]["must"].append({"term": {"index.issn.exact": issn}})
+ if in_doaj is not None:
+ self.query["query"]["bool"]["must"].append({"term": {"admin.in_doaj": in_doaj}})
+ self.query["size"] = max
+
def all_in_doaj(self):
q = deepcopy(self.all_doaj)
if self.minified:
diff --git a/portality/scripts/230609_find_articles_with_invalid_issns.py b/portality/scripts/230609_find_articles_with_invalid_issns.py
new file mode 100644
index 0000000000..8a02d851cc
--- /dev/null
+++ b/portality/scripts/230609_find_articles_with_invalid_issns.py
@@ -0,0 +1,57 @@
+from portality import models
+from portality.bll.services import article as articlesvc
+from portality.bll import exceptions
+import csv
+
+IN_DOAJ = {
+ "query": {
+ "bool": {
+ "must": [
+ {"term": {"admin.in_doaj": True}}
+ ]
+ }
+ }
+}
+
+
+if __name__ == "__main__":
+
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-o", "--out", help="output file path", required=True)
+ args = parser.parse_args()
+
+ with open(args.out, "w", encoding="utf-8") as f:
+ writer = csv.writer(f)
+ writer.writerow(["ID", "PISSN", "EISSN", "Journals found with article's PISSN", "In doaj?", "Journals found with article's EISSN", "In doaj?", "Error"])
+
+ for a in models.Article.iterate(q=IN_DOAJ, page_size=100, keepalive='5m'):
+ article = models.Article(_source=a)
+ bibjson = article.bibjson()
+ try:
+ articlesvc.ArticleService._validate_issns(bibjson)
+ articlesvc.ArticleService.match_journal_with_validation(bibjson)
+ except exceptions.ArticleNotAcceptable as e:
+ id = article.id
+ pissn = bibjson.get_identifiers("pissn")
+ eissn = bibjson.get_identifiers("eissn")
+ j_p = [j["id"] for j in models.Journal.find_by_issn(pissn)]
+ j_p_in_doaj = []
+ if (j_p):
+ for j in j_p:
+ jobj = models.Journal.pull(j)
+ if (jobj):
+ j_p_in_doaj.append(jobj.is_in_doaj())
+ else:
+ j_p_in_doaj.append("n/a")
+ j_e = [j["id"] for j in models.Journal.find_by_issn(eissn)]
+ j_e_in_doaj = []
+ if (j_e):
+ for j in j_e:
+ jobj = models.Journal.pull(j)
+ if (jobj):
+ j_e_in_doaj.append(jobj.is_in_doaj())
+ else:
+ j_e_in_doaj.append("n/a")
+ writer.writerow([id, pissn, eissn, j_p, j_p_in_doaj, j_e, j_e_in_doaj, str(e)])
diff --git a/portality/scripts/journalcsv.py b/portality/scripts/journalcsv.py
index 7c00cbdf41..dedfb51c9b 100644
--- a/portality/scripts/journalcsv.py
+++ b/portality/scripts/journalcsv.py
@@ -9,10 +9,17 @@
exit()
user = app.config.get("SYSTEM_USERNAME")
+ print("Running journal CSV export for user {}".format(user))
+
job = journal_csv.JournalCSVBackgroundTask.prepare(user)
- job = StdOutBackgroundJob(job)
+ job = StdOutBackgroundJob(job, force_logging=True)
+ print("Background Job prepared with id {}".format(job.id))
+
task = journal_csv.JournalCSVBackgroundTask(job)
+ print("Background task created")
+
BackgroundApi.execute(task)
+ print("Finished journal CSV export for user {}".format(user))
diff --git a/portality/scripts/journals_update_via_csv.py b/portality/scripts/journals_update_via_csv.py
index 298b7c817b..c696068a85 100644
--- a/portality/scripts/journals_update_via_csv.py
+++ b/portality/scripts/journals_update_via_csv.py
@@ -82,6 +82,7 @@
reader = csv.DictReader(g, fieldnames=header_row)
# verify header row with current CSV headers, report errors
+ # TODO: Include 'Owner' field - but we should probably base this process off the AdminCSV too.
expected_headers = JournalFixtureFactory.csv_headers()
# Always perform a match check on supplied headers, not counting order
@@ -155,6 +156,14 @@
if len(updates) > 0:
[print(upd) for upd in updates]
+ # Check we have the expected owner (if supplied) before proceeding to create an update request
+ own = row.get('Owner')
+ if own is not None:
+ if own.strip().lower() != j.owner.strip().lower():
+ print('ABORTING - supplied owner {0} mismatches journal owner {1}.'.format(own, j.owner))
+ writer.writerow([j.id, ' | '.join(updates), 'COULD NOT UPDATE - Owner mismatch. Expected {0} Got {1}'.format(own, j.owner)])
+ continue
+
# Create an update request for this journal
update_req = None
jlock = None
@@ -204,7 +213,7 @@
# Add note to UR if supplied
if note:
- fc.target.add_note(note)
+ fc.target.add_note(note, author_id=sys_acc.id)
if not args.manual_review:
# This is the update request, in 'update request' state
diff --git a/portality/scripts/manage_background_jobs.py b/portality/scripts/manage_background_jobs.py
index fbfa648f8b..4faa18193d 100644
--- a/portality/scripts/manage_background_jobs.py
+++ b/portality/scripts/manage_background_jobs.py
@@ -22,45 +22,64 @@
from portality.lib import dates
from portality.lib.dates import DEFAULT_TIMESTAMP_VAL
+from portality.tasks.anon_export import AnonExportBackgroundTask
+from portality.tasks.article_bulk_delete import ArticleBulkDeleteBackgroundTask
+from portality.tasks.article_cleanup_sync import ArticleCleanupSyncBackgroundTask
+from portality.tasks.article_duplicate_report import ArticleDuplicateReportBackgroundTask
+from portality.tasks.async_workflow_notifications import AsyncWorkflowBackgroundTask
+from portality.tasks.check_latest_es_backup import CheckLatestESBackupBackgroundTask
+# from portality.tasks.find_discontinued_soon import FindDiscontinuedSoonBackgroundTask
+from portality.tasks.harvester import HarvesterBackgroundTask
from portality.tasks.ingestarticles import IngestArticlesBackgroundTask
-from portality.tasks.preservation import PreservationBackgroundTask
-from portality.tasks.suggestion_bulk_edit import SuggestionBulkEditBackgroundTask
-from portality.tasks.sitemap import SitemapBackgroundTask
-from portality.tasks.read_news import ReadNewsBackgroundTask
+from portality.tasks.journal_bulk_delete import JournalBulkDeleteBackgroundTask
+from portality.tasks.journal_bulk_edit import JournalBulkEditBackgroundTask
from portality.tasks.journal_csv import JournalCSVBackgroundTask
-from portality.tasks.article_cleanup_sync import ArticleCleanupSyncBackgroundTask
from portality.tasks.journal_in_out_doaj import SetInDOAJBackgroundTask
-from portality.tasks.check_latest_es_backup import CheckLatestESBackupBackgroundTask
+from portality.tasks.preservation import PreservationBackgroundTask
from portality.tasks.prune_es_backups import PruneESBackupsBackgroundTask
from portality.tasks.public_data_dump import PublicDataDumpBackgroundTask
-from portality.tasks.harvester import HarvesterBackgroundTask
-from portality.tasks.anon_export import AnonExportBackgroundTask
+from portality.tasks.read_news import ReadNewsBackgroundTask
+from portality.tasks.reporting import ReportingBackgroundTask
+from portality.tasks.sitemap import SitemapBackgroundTask
+from portality.tasks.suggestion_bulk_edit import SuggestionBulkEditBackgroundTask
+
+from portality.background import BackgroundApi
# dict of {task_name: task_class} so we can interact with the jobs
HANDLERS = {
- PreservationBackgroundTask.__action__:PreservationBackgroundTask,
+ AnonExportBackgroundTask.__action__: AnonExportBackgroundTask,
+ ArticleBulkDeleteBackgroundTask.__action__: ArticleBulkDeleteBackgroundTask,
+ ArticleCleanupSyncBackgroundTask.__action__: ArticleCleanupSyncBackgroundTask,
+ ArticleDuplicateReportBackgroundTask.__action__: ArticleDuplicateReportBackgroundTask,
+ AsyncWorkflowBackgroundTask.__action__: AsyncWorkflowBackgroundTask,
+ CheckLatestESBackupBackgroundTask.__action__: CheckLatestESBackupBackgroundTask,
+ # FindDiscontinuedSoonBackgroundTask.__action__: FindDiscontinuedSoonBackgroundTask,
+ HarvesterBackgroundTask.__action__: HarvesterBackgroundTask,
IngestArticlesBackgroundTask.__action__: IngestArticlesBackgroundTask,
- SuggestionBulkEditBackgroundTask.__action__: SuggestionBulkEditBackgroundTask,
- SitemapBackgroundTask.__action__: SitemapBackgroundTask,
- ReadNewsBackgroundTask.__action__: ReadNewsBackgroundTask,
+ JournalBulkDeleteBackgroundTask.__action__: JournalBulkDeleteBackgroundTask,
+ JournalBulkEditBackgroundTask.__action__: JournalBulkEditBackgroundTask,
JournalCSVBackgroundTask.__action__: JournalCSVBackgroundTask,
- ArticleCleanupSyncBackgroundTask.__action__: ArticleCleanupSyncBackgroundTask,
SetInDOAJBackgroundTask.__action__: SetInDOAJBackgroundTask,
- CheckLatestESBackupBackgroundTask.__action__: CheckLatestESBackupBackgroundTask,
+ PreservationBackgroundTask.__action__:PreservationBackgroundTask,
PruneESBackupsBackgroundTask.__action__: PruneESBackupsBackgroundTask,
PublicDataDumpBackgroundTask.__action__: PublicDataDumpBackgroundTask,
- HarvesterBackgroundTask.__action__: HarvesterBackgroundTask,
- AnonExportBackgroundTask.__action__: AnonExportBackgroundTask,
+ ReadNewsBackgroundTask.__action__: ReadNewsBackgroundTask,
+ ReportingBackgroundTask.__action__: ReportingBackgroundTask,
+ SitemapBackgroundTask.__action__: SitemapBackgroundTask,
+ SuggestionBulkEditBackgroundTask.__action__: SuggestionBulkEditBackgroundTask
}
-def manage_jobs(verb, action, status, from_date, to_date):
+def manage_jobs(verb, action, status, from_date, to_date, prompt=True):
q = JobsQuery(action, status, from_date, to_date)
jobs = models.BackgroundJob.q2obj(q=q.query())
print('You are about to {verb} {count} job(s)'.format(verb=verb, count=len(jobs)))
- doit = input('Proceed? [y\\N] ')
+
+ doit = "y"
+ if prompt:
+ doit = input('Proceed? [y\\N] ')
if doit.lower() == 'y':
print('Please wait...')
@@ -70,7 +89,7 @@ def manage_jobs(verb, action, status, from_date, to_date):
continue
job.add_audit_message("Job {pp} from job management script.".format(
- pp={'requeue': 'requeued', 'cancel': 'cancelled'}[verb]))
+ pp={'requeue': 'requeued', 'cancel': 'cancelled', "process": "processed"}[verb]))
if verb == 'requeue': # Re-queue and execute immediately
job.queue()
@@ -78,18 +97,24 @@ def manage_jobs(verb, action, status, from_date, to_date):
elif verb == 'cancel': # Just apply cancelled status
job.cancel()
job.save()
+ elif verb == 'process':
+ task = HANDLERS[job.action](job) # Just execute immediately without going through huey
+ BackgroundApi.execute(task)
print('done.')
else:
print('No action.')
-def requeue_jobs(action, status, from_date, to_date):
- manage_jobs('requeue', action, status, from_date, to_date)
+def requeue_jobs(action, status, from_date, to_date, prompt=True):
+ manage_jobs('requeue', action, status, from_date, to_date, prompt=prompt)
+
+def cancel_jobs(action, status, from_date, to_date, prompt=True):
+ manage_jobs('cancel', action, status, from_date, to_date, prompt=prompt)
-def cancel_jobs(action, status, from_date, to_date):
- manage_jobs('cancel', action, status, from_date, to_date)
+def process_jobs(action, status, from_date, to_date, prompt=True):
+ manage_jobs("process", action, status, from_date, to_date, prompt=prompt)
class JobsQuery(object):
@@ -127,6 +152,8 @@ def query(self):
help='Add these jobs back on the job queue for processing', action='store_true')
parser.add_argument('-c', '--cancel',
help='Cancel these jobs (set their status to "cancelled")', action='store_true')
+ parser.add_argument("-p", "--process",
+ help="Immediately process these jobs on the command line", action="store_true")
parser.add_argument('-s', '--status',
help='Filter for job status. Default is "queued"',
default='queued')
@@ -139,15 +166,18 @@ def query(self):
parser.add_argument('-t', '--to_date',
help='Date to which to look for jobs in the given type and status',
default=dates.now_str())
+ parser.add_argument("-y", "--yes", help="Answer yes to all prompts", action="store_true")
args = parser.parse_args()
if args.requeue and args.cancel:
print('Use only --requeue OR --cancel, not both.')
exit(1)
elif args.requeue:
- requeue_jobs(args.action, args.status, args.from_date, args.to_date)
+ requeue_jobs(args.action, args.status, args.from_date, args.to_date, prompt=False if args.yes else True)
elif args.cancel:
- cancel_jobs(args.action, args.status, args.from_date, args.to_date)
+ cancel_jobs(args.action, args.status, args.from_date, args.to_date, prompt=False if args.yes else True)
+ elif args.process:
+ process_jobs(args.action, args.status, args.from_date, args.to_date, prompt=False if args.yes else True)
else:
- print('You must supply one of --requeue or --cancel to run this script')
+ print('You must supply one of --requeue, --cancel or --process to run this script')
exit(1)
diff --git a/portality/settings.py b/portality/settings.py
index 44fc4ee22f..bd457f29e0 100644
--- a/portality/settings.py
+++ b/portality/settings.py
@@ -9,7 +9,7 @@
# Application Version information
# ~~->API:Feature~~
-DOAJ_VERSION = "6.3.15"
+DOAJ_VERSION = "6.4.5"
API_VERSION = "3.0.1"
######################################
@@ -422,11 +422,14 @@
# Crontab for never running a job - February 31st (use to disable tasks)
CRON_NEVER = {"month": "2", "day": "31", "day_of_week": "*", "hour": "*", "minute": "*"}
+# Additional Logging for scheduled JournalCSV
+EXTRA_JOURNALCSV_LOGGING = False
+
# Crontab schedules must be for unique times to avoid delays due to perceived race conditions
HUEY_SCHEDULE = {
"sitemap": {"month": "*", "day": "*", "day_of_week": "*", "hour": "8", "minute": "0"},
"reporting": {"month": "*", "day": "1", "day_of_week": "*", "hour": "0", "minute": "0"},
- "journal_csv": {"month": "*", "day": "*", "day_of_week": "*", "hour": "*", "minute": "35"},
+ "journal_csv": {"month": "*", "day": "*", "day_of_week": "*", "hour": "*", "minute": "20"},
"read_news": {"month": "*", "day": "*", "day_of_week": "*", "hour": "*", "minute": "30"},
"article_cleanup_sync": {"month": "*", "day": "2", "day_of_week": "*", "hour": "0", "minute": "0"},
"async_workflow_notifications": {"month": "*", "day": "*", "day_of_week": "1", "hour": "5", "minute": "0"},
@@ -946,6 +949,8 @@
# OAI-PMH SETTINGS
# ~~->OAIPMH:Feature~~
+OAI_ADMIN_EMAIL = 'helpdesk+oai@doaj.org'
+
# ~~->OAIAriticleXML:Crosswalk~~
# ~~->OAIJournalXML:Crosswalk~~
OAI_DC_METADATA_FORMAT = {
diff --git a/portality/static/js/doaj.fieldrender.edges.js b/portality/static/js/doaj.fieldrender.edges.js
index 169ce93133..49faf4b543 100644
--- a/portality/static/js/doaj.fieldrender.edges.js
+++ b/portality/static/js/doaj.fieldrender.edges.js
@@ -645,13 +645,13 @@ $.extend(true, doaj, {
toggle = '';
}
var placeholder = 'Search ' + this.component.nodeCount + ' subjects';
- var frag = '
' + this.title + toggle + '
\
-
\
+ var frag = '
\
+
\
\
\
\
{{FILTERS}}
\
-
';
+
';
// substitute in the component parts
frag = frag.replace(/{{FILTERS}}/g, treeFrag);
@@ -1832,10 +1832,10 @@ $.extend(true, doaj, {
if (this.togglable) {
toggle = '';
}
- var frag = '
' + this.component.display + toggle + '
\
-
\
+ var frag = '
\
+
\
{{FILTERS}}
\
-
';
+
';
// substitute in the component parts
frag = frag.replace(/{{FILTERS}}/g, filterFrag + results);
@@ -2083,10 +2083,10 @@ $.extend(true, doaj, {
if (this.togglable) {
toggle = '';
}
- var frag = '
' + this.component.display + toggle + '
\
-
\
+ var frag = '
\
+
\
{{FILTERS}}
\
-
';
+
';
// substitute in the component parts
frag = frag.replace(/{{FILTERS}}/g, filterFrag + results);
diff --git a/portality/static/js/formulaic.js b/portality/static/js/formulaic.js
index 99367a6c91..c970e12c18 100644
--- a/portality/static/js/formulaic.js
+++ b/portality/static/js/formulaic.js
@@ -1195,7 +1195,6 @@ var formulaic = {
this.init();
},
-
newClickableOwner : function(params) {
return edges.instantiate(formulaic.widgets.ClickableOwner, params)
},
@@ -1238,7 +1237,27 @@ var formulaic = {
this.init();
},
+ newClickToCopy : function(params) {
+ return edges.instantiate(formulaic.widgets.ClickToCopy, params)
+ },
+ ClickToCopy : function(params) {
+ this.fieldDef = params.fieldDef;
+ this.init = function() {
+ var elements = $("#click-to-copy--" + this.fieldDef.name);
+ edges.on(elements, "click", this, "copy");
+ };
+ this.copy = function(element) {
+ let form = new doaj.af.BaseApplicationForm()
+ let value = form.determineFieldsValue(this.fieldDef.name)
+ let value_to_copy = form.convertValueToText(value);
+ navigator.clipboard.writeText(value_to_copy)
+ var confirmation = $("#copy-confirmation--" + this.fieldDef.name);
+ confirmation.text("Copied: " + value_to_copy);
+ confirmation.show().delay(3000).fadeOut();
+ };
+ this.init();
+ },
newTrimWhitespace : function(params) {
return edges.instantiate(formulaic.widgets.TrimWhitespace, params)
},
diff --git a/portality/store.py b/portality/store.py
index 90300fb1aa..2d0935ee19 100644
--- a/portality/store.py
+++ b/portality/store.py
@@ -292,7 +292,8 @@ def list_container_ids(self):
return [x for x in os.listdir(self.dir) if os.path.isdir(os.path.join(self.dir, x))]
-def prune_container(storage, container_id, sort, filter=None, keep=1):
+def prune_container(storage, container_id, sort, filter=None, keep=1, logger=None):
+ logger = logger if logger is not None else lambda x: x
action_register = []
filelist = storage.list(container_id)
@@ -316,7 +317,9 @@ def prune_container(storage, container_id, sort, filter=None, keep=1):
#action_register.append("Considering files for retention in the following order: " + ", ".join(filtered_sorted))
remove = filtered_sorted[keep:]
- action_register.append("Removed old files: " + ", ".join(remove))
+ msg = "Removed old files: " + ", ".join(remove)
+ action_register.append(msg)
+ logger(msg)
for fn in remove:
storage.delete_file(container_id, fn)
diff --git a/portality/tasks/async_workflow_notifications.py b/portality/tasks/async_workflow_notifications.py
index 4bea0b1104..b0236af7ec 100644
--- a/portality/tasks/async_workflow_notifications.py
+++ b/portality/tasks/async_workflow_notifications.py
@@ -333,7 +333,7 @@ def associate_editor_notifications(emails_dict, limit=None):
assoc_email = assoc.email
except AttributeError:
# There isn't an account for that id
- app.logger.warn("No account found for ID {0}".format(assoc_id))
+ app.logger.warning("No account found for ID {0}".format(assoc_id))
continue
text = render_template('email/workflow_reminder_fragments/assoc_ed_age_frag', num_idle=idle, x_days=X_DAYS, num_very_idle=very_idle, y_weeks=Y_WEEKS, url=url)
diff --git a/portality/tasks/harvester_helpers/epmc/client.py b/portality/tasks/harvester_helpers/epmc/client.py
index fb742b0714..2957e9fe62 100644
--- a/portality/tasks/harvester_helpers/epmc/client.py
+++ b/portality/tasks/harvester_helpers/epmc/client.py
@@ -37,9 +37,9 @@ def check_epmc_version(resp_json):
received_ver = resp_json['version']
configured_ver = app.config.get("EPMC_TARGET_VERSION")
if received_ver != configured_ver:
- app.logger.warn("Mismatching EPMC API version; recommend checking for changes. Expected '{0}' Found '{1}'".format(configured_ver, received_ver))
+ app.logger.warning("Mismatching EPMC API version; recommend checking for changes. Expected '{0}' Found '{1}'".format(configured_ver, received_ver))
except KeyError:
- app.logger.warn("Couldn't check EPMC API version; did not find 'version' key in response. Proceed with caution as the EPMC API may have changed.")
+ app.logger.warning("Couldn't check EPMC API version; did not find 'version' key in response. Proceed with caution as the EPMC API may have changed.")
def to_keywords(s):
diff --git a/portality/tasks/helpers/background_helper.py b/portality/tasks/helpers/background_helper.py
index 2790475729..66a15343e8 100644
--- a/portality/tasks/helpers/background_helper.py
+++ b/portality/tasks/helpers/background_helper.py
@@ -26,7 +26,7 @@ def get_queue_id_by_task_queue(task_queue: RedisHuey):
elif task_queue.name == main_queue.name:
return constants.BGJOB_QUEUE_ID_MAIN
else:
- app.logger.warn(f'unknown task_queue[{task_queue}]')
+ app.logger.warning(f'unknown task_queue[{task_queue}]')
return constants.BGJOB_QUEUE_ID_UNKNOWN
@@ -141,7 +141,7 @@ def _load_bgtask_safe(_mi):
return _mi.module_finder.find_spec(_mi.name).loader.load_module(_mi.name)
except RuntimeError as e:
if 'No configuration for scheduled action' in str(e):
- app.logger.warn(f'config for {_mi.name} not found')
+ app.logger.warning(f'config for {_mi.name} not found')
return None
raise e
diff --git a/portality/tasks/ingestarticles.py b/portality/tasks/ingestarticles.py
index de6991ab40..e798f4005d 100644
--- a/portality/tasks/ingestarticles.py
+++ b/portality/tasks/ingestarticles.py
@@ -312,11 +312,16 @@ def _process(self, file_upload: models.FileUpload):
for article in articles:
article.set_upload_id(file_upload.id)
result = articleService.batch_create_articles(articles, account, add_journal_info=True)
- except (IngestException, CrosswalkException) as e:
- job.add_audit_message("IngestException: {msg}. Inner message: {inner}. Stack: {x}"
- .format(msg=e.message, inner=e.inner_message, x=e.trace()))
+ except (IngestException, CrosswalkException, ArticleNotAcceptable) as e:
+ if hasattr(e, 'inner_message'):
+ job.add_audit_message("{exception}: {msg}. Inner message: {inner}. Stack: {x}"
+ .format(exception=e.__class__.__name__, msg=e.message, inner=e.inner_message, x=e.trace()))
+ file_upload.failed(e.message, e.inner_message)
+ else:
+ job.add_audit_message("{exception}: {msg}.".format(exception=e.__class__.__name__, msg=e.message))
+ file_upload.failed(e.message)
+
job.outcome_fail()
- file_upload.failed(e.message, e.inner_message)
result = e.result
try:
file_failed(path)
@@ -324,7 +329,7 @@ def _process(self, file_upload: models.FileUpload):
except:
job.add_audit_message("Error cleaning up file which caused IngestException: {x}"
.format(x=traceback.format_exc()))
- except (DuplicateArticleException, ArticleNotAcceptable) as e:
+ except DuplicateArticleException as e:
job.add_audit_message(str(e))
job.outcome_fail()
file_upload.failed(str(e))
diff --git a/portality/tasks/journal_csv.py b/portality/tasks/journal_csv.py
index e863aeb9c4..153a13735e 100644
--- a/portality/tasks/journal_csv.py
+++ b/portality/tasks/journal_csv.py
@@ -15,12 +15,22 @@ def run(self):
Execute the task as specified by the background_job
:return:
"""
+
+ def logger(msg):
+ self.background_job.add_audit_message(msg)
+
+ _l = logger if app.config.get('EXTRA_JOURNALCSV_LOGGING', False) else None
+
job = self.background_job
journalService = DOAJ.journalService()
- url, action_register = journalService.csv()
- for ar in action_register:
- job.add_audit_message(ar)
+ url, action_register = journalService.csv(logger=_l)
+
+ # Log directly to the task if we don't have extra logging configured
+ if _l is None:
+ for ar in action_register:
+ job.add_audit_message(ar)
+
job.add_audit_message("CSV generated; will be served from {y}".format(y=url))
def cleanup(self):
diff --git a/portality/tasks/preservation.py b/portality/tasks/preservation.py
index 037ba4c1dd..4fc1cf09b9 100644
--- a/portality/tasks/preservation.py
+++ b/portality/tasks/preservation.py
@@ -5,7 +5,6 @@
import shutil
import tarfile
from copy import deepcopy
-from datetime import datetime
from zipfile import ZipFile
import requests
@@ -125,11 +124,15 @@ def __init__(self):
self.__unbagged_articles = []
self.__not_found_articles = []
self.__no_files_articles = []
+ self.__uploaded_journals = []
self.has_errors = False
def add_successful_article(self, article: ArticlePackage):
self.__successful_articles.append(os.path.basename(article.article_dir))
+ def add_uploaded_journal(self, journal_package):
+ self.__uploaded_journals.append(journal_package)
+
def add_unowned_articles(self, article: ArticlePackage):
self.has_errors = True
self.__unowned_articles.append(os.path.basename(article.article_dir))
@@ -167,6 +170,9 @@ def not_found_articles(self):
def no_files_articles(self):
return self.__no_files_articles
+ def uploaded_journals(self):
+ return self.__uploaded_journals
+
def get_count(self):
return len(self.__successful_articles) + \
len(self.__unowned_articles) + \
@@ -242,24 +248,50 @@ def run(self):
job.add_audit_message("Create Package structure")
articles_list = preserv.create_package_structure()
- self.save_articles_list(articles_list, preserve_model)
+
app.logger.debug("Created package structure")
if len(articles_list.successful_articles()) > 0:
- package = PreservationPackage(preserv.preservation_dir, job.user)
- job.add_audit_message("Create preservation package")
- tar_file = package.create_package()
- app.logger.debug(f"Created tar file {tar_file}")
+ # Each subdirectory is a jornal and the directory name is ISSN of the journal
+ # iterate through the directories and upload each journal as an individual package
+ dirs = [f.name for f in os.scandir(preserv.preservation_dir) if f.is_dir()]
+ upload_failed = False
+ for sub_dir in dirs:
+
+ package = PreservationPackage(preserv.preservation_dir, sub_dir, job.user)
+ job.add_audit_message("Create preservation package for " + sub_dir)
+ tar_file = package.create_package()
+
+ app.logger.debug(f"Created tar file {tar_file}")
+
+ job.add_audit_message("Create shasum for " + sub_dir)
+ sha256 = package.sha256(package.tar_file)
+
+ job.add_audit_message("Upload package " + sub_dir)
+ response = package.upload_package(sha256, package.tar_file)
+ app.logger.debug(f"Uploaded. Response{response.text}")
+
+ job.add_audit_message("Validate response")
+ self.validate_response(response, tar_file, sha256, preserve_model)
+
+ if preserve_model.status == 'failed':
+ upload_failed = True
+ break
+ else:
+ articles_list.add_uploaded_journal(package.tar_file_name)
- job.add_audit_message("Create shasum")
- sha256 = package.sha256()
+ # Upload the identifier file
+ job.add_audit_message("Create shasum for identifier")
+ sha256 = package.sha256(package.identifier_file)
- job.add_audit_message("Upload package")
- response = package.upload_package(sha256)
- app.logger.debug(f"Uploaded. Response{response.text}")
+ identifier_file_name = os.path.basename(package.identifier_file)
+ job.add_audit_message("Upload identifier file " + identifier_file_name)
+ package.upload_package(sha256, package.identifier_file)
+ articles_list.add_uploaded_journal(identifier_file_name)
+ app.logger.debug(f"Uploaded identifier file " + identifier_file_name)
- job.add_audit_message("Validate response")
- self.validate_response(response, tar_file, sha256, preserve_model)
+ if not upload_failed:
+ preserve_model.uploaded_to_ia()
# Check if the only few articles are successful
if articles_list.is_partial_success():
@@ -277,6 +309,8 @@ def run(self):
preserve_model.failed(FailedReasons.no_valid_article_available)
preserve_model.save()
+ self.save_articles_list(articles_list, preserve_model)
+
except (PreservationException, Exception) as exp:
# ~~-> PreservationException:Exception~~
preserve_model.failed(str(exp))
@@ -304,6 +338,8 @@ def save_articles_list(self, articles_list: ArticlesList, model: PreservationSta
model.unbagged_articles(articles_list.unbagged_articles())
if len(articles_list.no_files_articles()) > 0:
model.no_files_articles(articles_list.no_files_articles())
+ if len(articles_list.uploaded_journals()) > 0:
+ model.uploaded_journals(articles_list.uploaded_journals())
model.save()
def cleanup(self):
@@ -344,8 +380,7 @@ def validate_response(self, response, tar_file, sha256, model):
if res_filename and res_filename == tar_file:
if res_shasum and res_shasum == sha256:
- app.logger.info("successfully uploaded")
- model.uploaded_to_ia()
+ app.logger.info("successfully uploaded " + tar_file)
else:
model.failed(FailedReasons.checksum_doesnot_match)
else:
@@ -378,7 +413,7 @@ def validate_response(self, response, tar_file, sha256, model):
model.save()
else:
- app.logger.error(f"Upload failed {response.text}")
+ app.logger.error(f"Upload failed for {tar_file}. Reason - {response.text}")
model.failed(response.text)
model.save()
@@ -534,11 +569,13 @@ def create_package_structure(self) -> ArticlesList:
# Fetch identifiers at the root directory
if os.path.dirname(dir) == self.__local_dir:
- if Preservation.IDENTIFIERS_CSV in files:
- # Get articles info from csv file
- # ~~-> CSVReader:Feature~~
- csv_reader = CSVReader(os.path.join(dir, Preservation.IDENTIFIERS_CSV))
- self.__csv_articles_dict = csv_reader.articles_info()
+ for file in files:
+ if Preservation.IDENTIFIERS_CSV.lower() == file.lower():
+ # Get articles info from csv file
+ # ~~-> CSVReader:Feature~~
+ csv_reader = CSVReader(os.path.join(dir, file))
+ self.__csv_articles_dict = csv_reader.articles_info()
+ break
# process only the directories that has articles
else:
self.__process_article(dir, files, articles_list)
@@ -557,10 +594,12 @@ def __process_article(self, dir_path, files, articles_list):
return
# check if identifier file exist
- if Preservation.IDENTIFIER_FILE in files:
- with open(os.path.join(dir_path, Preservation.IDENTIFIER_FILE)) as file:
- identifiers = file.read().splitlines()
- elif self.__csv_articles_dict:
+ for file in files:
+ if Preservation.IDENTIFIER_FILE.lower() == file.lower():
+ with open(os.path.join(dir_path, file)) as identifier_file:
+ identifiers = identifier_file.read().splitlines()
+
+ if not identifiers and self.__csv_articles_dict:
if dir_name in self.__csv_articles_dict:
identifiers = self.__csv_articles_dict[dir_name]
@@ -570,10 +609,9 @@ def __process_article(self, dir_path, files, articles_list):
if article:
article_data = article.data
- if not self.owner_of_article(article):
- articles_list.add_unowned_articles(package)
+ is_owner = self.owner_of_article(article)
- else:
+ if isinstance(is_owner, bool) and is_owner == True:
issn, article_id, metadata_json = self.get_article_info(article_data)
try:
package = ArticlePackage(dir_path, files)
@@ -584,10 +622,17 @@ def __process_article(self, dir_path, files, articles_list):
package.create_article_bagit_structure()
+ # Create and update the identifier file for all articles in the journal
+ with open(os.path.join(self.__preservation_dir, issn + ".txt"), 'a') as identifier_file:
+ identifier_file.write(os.path.basename(dir_path) + "," + article_id + "," +
+ ','.join(identifiers) + "\n")
+
articles_list.add_successful_article(package)
except Exception:
articles_list.add_unbagged_articles(package)
app.logger.exception(f"Error while create article ( {article_id} ) package")
+ else:
+ articles_list.add_unowned_articles(package)
else:
# skip the article if not found
@@ -677,11 +722,20 @@ class PreservationPackage:
Creates preservation package and upload to Internet Server
"""
- def __init__(self, directory, owner):
- self.package_dir = directory
- self.tar_file = self.package_dir + ".tar.gz"
+ def __init__(self, preservation_dir, journal_dir, owner):
+ self.preservation_dir = preservation_dir
+ self.journal_dir = journal_dir
+ self.package_dir = os.path.join(self.preservation_dir, journal_dir)
+ self.created_time = dates.now_str("%Y-%m-%d-%H-%M-%S")
+ self.tar_file = self.package_dir + "_" + self.created_time + ".tar.gz"
self.tar_file_name = os.path.basename(self.tar_file)
self.__owner = owner
+ self.identifier_file = self.package_dir + "_" + self.created_time + ".txt"
+ try:
+ # Rename the identifier file to match the tar file
+ shutil.move(self.package_dir + ".txt", self.identifier_file)
+ except Exception as e:
+ app.logger.exception(e)
def create_package(self):
"""
@@ -697,7 +751,7 @@ def create_package(self):
return self.tar_file_name
- def upload_package(self, sha256sum):
+ def upload_package(self, sha256sum, file):
url = app.config.get("PRESERVATION_URL")
username = app.config.get("PRESERVATION_USERNAME")
@@ -707,7 +761,7 @@ def upload_package(self, sha256sum):
collection = params[0]
collection_id = params[1]
- file_name = os.path.basename(self.tar_file)
+ file_name = os.path.basename(file)
# payload for upload request
payload = {
@@ -727,7 +781,7 @@ def upload_package(self, sha256sum):
headers = {}
# get the file to upload
try:
- with open(self.tar_file, "rb") as f:
+ with open(file, "rb") as f:
files = {'file_field': (file_name, f)}
response = requests.post(url, headers=headers, auth=(username, password), files=files, data=payload)
except (IOError, Exception) as exp:
@@ -736,13 +790,13 @@ def upload_package(self, sha256sum):
return response
- def sha256(self):
+ def sha256(self, file):
"""
Creates sha256 hash for the tar file
"""
sha256_hash = hashlib.sha256()
- with open(self.tar_file, "rb") as f:
+ with open(file, "rb") as f:
# Read and update hash string value in blocks of 64K
for byte_block in iter(lambda: f.read(65536), b""):
sha256_hash.update(byte_block)
diff --git a/portality/templates/account/forgot.html b/portality/templates/account/forgot.html
index 241525adfd..d8f5e9c837 100644
--- a/portality/templates/account/forgot.html
+++ b/portality/templates/account/forgot.html
@@ -3,7 +3,7 @@
{% block page_title %}Reset your password{% endblock %}
{% block content %}
-
+
-
-
- {# this next bit has to be all on one line so that the spacing is correct #}
- {% if bibjson.pissn %}{{bibjson.pissn}} (Print){% endif %}{% if bibjson.eissn %}{% if bibjson.pissn %} / {% endif %}{{bibjson.eissn}} (Online){% endif %}
-
-
-
- {% if bibjson.discontinued_date is not none and bibjson.discontinued_date | is_in_the_past %}
-
Ceased publication on {{ bibjson.discontinued_datestamp.strftime("%d %B %Y") }}
as {% if bibjson.apc_url %}
+ {% endif %}
+ publication fees{% if bibjson.apc_url %}{% endif %} (article processing charges
+ or APCs)
+ {%- if bibjson.has_other_charges %}
+ and there are
+ {% if bibjson.other_charges_url %}
+
+ other charges
+
+ {%- else %}
+ other charges
+ {%- endif -%}
+ {% endif -%}
+ .
{% else %}
- {{ bibjson.title }}, ISSN: {{ bibjson.get_preferred_issn() }} (not available in DOAJ)
+
There are no publication fees (article processing
+ charges or APCs) to publish with this journal
+ {%- if bibjson.has_other_charges %}
+ but there are
+ {% if bibjson.other_charges_url %}
+
+ other charges
+
+ {% else %}
+ other charges
+ {% endif %}
+ {% endif -%}
+ .
There are no publication fees (article processing charges or APCs) to publish with this journal
- {%- if bibjson.has_other_charges %}
- but there are
- {% if bibjson.other_charges_url %}
- other charges
- {% else %}
- other charges
- {% endif %}
- {% endif -%}
- .
This journal uses
- {% for license in bibjson.licenses %}
- {%- if loop.last and bibjson.licenses|length > 1 -%} or
- {%- elif not loop.first -%}, {%- endif -%}
- {% if license.type == "Publisher's own license" %} their publisher’s own
- {% else %} a {{ license.type }}
- {%- endif -%}
- {% endfor %}
- license.
-
- {% for policy in bibjson.deposit_policy %}
- {% set policy_data = DEPOSIT_POLICY_MAP.get(policy) %}
-
- {# FIXME: not a big fan of this hard-coding, presumably this could come from config somewhere #}
- {% if policy == "Sherpa/Romeo" or policy == "Diadorim" or policy == "Dulcinea" %}
-
- {{ policy }}
-
- {% else %}
- {{ policy }}
- {% endif %}
-
+
+
+ {# this next bit has to be all on one line so that the spacing is correct #}
+ {% if bibjson.pissn %}{{bibjson.pissn}} (Print){% endif %}{% if bibjson.eissn %}{% if bibjson.pissn %} / {% endif %}{{bibjson.eissn}} (Online){% endif %}
+
+
+
+ {% if bibjson.discontinued_date is not none and bibjson.discontinued_date | is_in_the_past %}
+
Ceased publication on {{ bibjson.discontinued_datestamp.strftime("%d %B %Y") }}
+ {% endif %}
+
+ {% set past = journal.get_past_continuations() %}
+ {% if past %}
+
Continues
+ {% for p in past %}
+ {% set bibjson = p.bibjson() %}
+ {% if bibjson.issns()|length > 0 %}
+ {% if p.is_in_doaj() %}
+ {{ bibjson.title }}
+ {% else %}
+ {{ bibjson.title }}, ISSN: {{ bibjson.get_preferred_issn() }} (not available in DOAJ)
+ {% endif %}
+ {% endif %}
+ {% if not loop.last %}; {% endif %}
+ {% endfor %}
+
+ {% endif %}
+
+ {% set future = journal.get_future_continuations() %}
+ {% if future %}
+
Continued by
+ {% for f in future %}
+ {% set bibjson = f.bibjson() %}
+ {% if bibjson.issns()|length > 0 %}
+ {% if f.is_in_doaj() %}
+ {{ bibjson.title }}
+ {% else %}
+ {{ bibjson.title }}, ISSN: {{ bibjson.get_preferred_issn() }} (not available in DOAJ)
+ {% endif %}
+ {% endif %}
+ {% if not loop.last %}; {% endif %}
+ {% endfor %}
+
There are three ways to upload article metadata to DOAJ:
@@ -20,6 +20,8 @@
Uploading metadata/article content a file
There are instructions on how to prepare and upload your XML file on our XML documentation page.
+
Are you receiving an error about one of your ISSNs that you haven't seen before? We recently changed the rules for uploading article metadata. We now require that a Print ISSN is in an issn tag and the Online ISSN is in an eissn tag. [See our sample XML file](https://doaj.org/docs/xml/#example-doaj-xml-file) for more information.
+
Failed XML uploads explained
This section explains the error messages that you may see when you upload article XML. Use the message in the 'Notes' column of your History of uploads table to correct your XML.
@@ -204,10 +206,17 @@
Failed XML uploads explained
A journal may have two ISSNs: an ISSN for the print version and an ISSN for the electronic version. Sometimes the ISSNs of the journal have changed.
The print and online ISSNs you have supplied are identical. If you supply 2 ISSNs they must be different: an ISSN for the print version and an ISSN for the electronic version.
+
+ ISSNs provided don't match any journal. We do not have a record of one or both of those ISSNs in DOAJ.
+
Check that all the Article ISSNs in the file are correct
+ Check that the journal to which you are trying to upload article metadata is indexed in DOAJ.
+
+ Check that the ISSNs in the metadata are both seen on the DOAJ journal record.
+
If you need to have the ISSNs of your DOAJ record updated, please contact us and we will check that the ISSNs are registered at the ISSN Portal and will then update the record accordingly.
If you believe all the ISSNs for the articles are correct, please contact us with the relevant details.
diff --git a/portality/templates/publisher/preservation.html b/portality/templates/publisher/preservation.html
index c87a2fcb97..d4bcb03722 100644
--- a/portality/templates/publisher/preservation.html
+++ b/portality/templates/publisher/preservation.html
@@ -25,10 +25,10 @@
Guidance before uploading your file
Only the full text of articles whose metadata is already uploaded to DOAJ can be sent to us. Check that your article metadata appears in DOAJ first.
Only articles for journals indexed in DOAJ can be uploaded.
-
Collect the full texts into a package consisting of folders and files.
-
Compress the package into a ZIP file.
-
Upload the zipped package (on this page).
-
Check that the file has uploaded correctly in the History of Uploads section and is not bigger than 50MB.
+
Collect the full text files into a package containing folders and files.
+
Compress the package into a ZIP file. Keep the name of the file simple: avoid spaces, hyphens, underscores, special characters, etc
+
Upload the zipped package (on this page). It may not be bigger than 50MB.
+
Check that the file has uploaded correctly in the History of Uploads section.
The package must have the following structure:
@@ -144,6 +144,10 @@
History of uploads (showing last {{previous|length}})
{% endif %}
{% if file.status == "uploaded" or file.status == "partial" and file.articles_info %}
+ {% if file.articles_info.uploaded_journals %}
+
Uploaded packages
+
{{file.articles_info.uploaded_journals}}
+ {% endif %}
{% if file.articles_info.successful_articles %}
{% include "includes/_hotjar.html" %}
{% endblock %}
diff --git a/portality/ui/messages.py b/portality/ui/messages.py
index ac7f9163bc..8eabd73f80 100644
--- a/portality/ui/messages.py
+++ b/portality/ui/messages.py
@@ -61,6 +61,7 @@ class Messages(object):
EXCEPTION_NO_CONTRIBUTORS_EXPLANATION = "DOAJ requires at least one author for each article."
EXCEPTION_TOO_MANY_ISSNS = "Too many ISSNs. Only 2 ISSNs are allowed: one Print ISSN and one Online ISSN."
+ EXCEPTION_MISMATCHED_ISSNS = "ISSNs provided don't match any journal."
EXCEPTION_ISSNS_OF_THE_SAME_TYPE = "Both ISSNs have the same type: {type}"
EXCEPTION_IDENTICAL_PISSN_AND_EISSN = "The Print and Online ISSNs supplied are identical. If you supply 2 ISSNs they must be different."
EXCEPTION_NO_ISSNS = "Neither Print ISSN nor Online ISSN has been supplied. DOAJ requires at least one ISSN."
diff --git a/portality/util.py b/portality/util.py
index 84423e1b91..2a4e1f36f3 100644
--- a/portality/util.py
+++ b/portality/util.py
@@ -186,3 +186,7 @@ def get_full_url_safe(endpoint):
except werkzeug.routing.BuildError:
app.logger.warning(f'endpoint not found -- [{endpoint}]')
return None
+
+def no_op(*args, **kwargs):
+ """ noop (no operation) function """
+ pass
\ No newline at end of file
diff --git a/portality/view/admin.py b/portality/view/admin.py
index 9e39473b03..010907c3b1 100644
--- a/portality/view/admin.py
+++ b/portality/view/admin.py
@@ -68,7 +68,7 @@ def journals_list():
try:
query = json.loads(request.values.get("q"))
except:
- app.logger.warn("Bad Request at admin/journals: " + str(request.values.get("q")))
+ app.logger.warning("Bad Request at admin/journals: " + str(request.values.get("q")))
abort(400)
# get the total number of journals to be affected
@@ -89,7 +89,7 @@ def journals_list():
try:
query = json.loads(request.data)
except:
- app.logger.warn("Bad Request at admin/journals: " + str(request.data))
+ app.logger.warning("Bad Request at admin/journals: " + str(request.data))
abort(400)
# get only the query part
@@ -123,7 +123,7 @@ def articles_list():
try:
query = json.loads(request.data)
except:
- app.logger.warn("Bad Request at admin/journals: " + str(request.data))
+ app.logger.warning("Bad Request at admin/journals: " + str(request.data))
abort(400)
# get only the query part
diff --git a/portality/view/doaj.py b/portality/view/doaj.py
index 5ac2f67f69..4baac018ec 100644
--- a/portality/view/doaj.py
+++ b/portality/view/doaj.py
@@ -198,8 +198,13 @@ def public_data_dump_redirect(record_type):
if not current_user.has_role(constants.ROLE_PUBLIC_DATA_DUMP):
abort(404)
- target_data = models.Cache.get_public_data_dump().get(record_type, {})
- if target_data is None:
+ # Make sure the PDD exists
+ pdd = models.Cache.get_public_data_dump()
+ if pdd is None:
+ abort(404)
+
+ target_data = pdd.get(record_type, {})
+ if not target_data:
abort(404)
main_store = store.StoreFactory.get(constants.STORE__SCOPE__PUBLIC_DATA_DUMP)
@@ -249,16 +254,7 @@ def autocomplete(doc_type, field_name):
# http://flask.pocoo.org/docs/security/#json-security
-@blueprint.route("/toc/")
-@blueprint.route("/toc//")
-@blueprint.route("/toc///")
-def toc(identifier=None, volume=None, issue=None):
- """ Table of Contents page for a journal. identifier may be the journal id or an issn """
- # If this route is changed, update JOURNAL_TOC_URL_FRAG in settings.py (partial ToC page link for journal CSV)
-
- journal = None
- issn_ref = False
-
+def find_toc_journal_by_identifier(identifier):
if identifier is None:
abort(404)
@@ -274,44 +270,40 @@ def toc(identifier=None, volume=None, issue=None):
if journal is None:
abort(400)
- issn_ref = True # just a flag so we can check if we were requested via issn
+ return journal
+
elif len(identifier) == 32:
js = models.Journal.pull(identifier) # Returns None on fail
if js is None or not js.is_in_doaj():
abort(404)
- journal = js
- else:
- abort(400)
+ return js
- # get the bibjson record that we're going to render
- bibjson = journal.bibjson()
+ abort(400)
- # The issn we are using to build the TOC
- issn = bibjson.get_preferred_issn()
- # now redirect to the canonical E-ISSN if one is available
+def is_issn_by_identifier(identifier):
+ return len(identifier) == 9
- if issn_ref: # the journal is referred to by an ISSN
+def find_correct_redirect_identifier(identifier, bibjson) -> str:
+ """
+ return None if identifier is correct and no redirect is needed
+
+ :param identifier:
+ :param bibjson:
+ :return:
+ """
+ if is_issn_by_identifier(identifier): # the journal is referred to by an ISSN
# if there is an E-ISSN (and it's not the one in the request), redirect to it
eissn = bibjson.get_one_identifier(bibjson.E_ISSN)
if eissn and identifier != eissn:
- return redirect(url_for('doaj.toc', identifier=eissn, volume=volume, issue=issue), 301)
+ return eissn
# if there's no E-ISSN, but there is a P-ISSN (and it's not the one in the request), redirect to the P-ISSN
if not eissn:
pissn = bibjson.get_one_identifier(bibjson.P_ISSN)
if pissn and identifier != pissn:
- return redirect(url_for('doaj.toc', identifier=pissn, volume=volume, issue=issue), 301)
-
- # Add the volume and issue to query if present in path
- if volume:
- filters = [dao.Facetview2.make_term_filter('bibjson.journal.volume.exact', volume)]
- if issue:
- filters += [dao.Facetview2.make_term_filter('bibjson.journal.number.exact', issue)]
- q = dao.Facetview2.make_query(filters=filters)
-
- return redirect(url_for('doaj.toc', identifier=issn) + '?source=' + dao.Facetview2.url_encode_query(q))
+ return pissn
# The journal has neither a PISSN or an EISSN. Yet somehow
# issn_ref is True, the request was referring to the journal
@@ -328,22 +320,52 @@ def toc(identifier=None, volume=None, issue=None):
if not issn:
issn = bibjson.get_one_identifier(bibjson.P_ISSN)
if issn:
- return redirect(url_for('doaj.toc', identifier=issn, volume=volume, issue=issue), 301)
+ return issn
# let it continue loading if we only have the hex UUID for the journal (no ISSNs)
# and the user is referring to the toc page via that ID
- # get the continuations for this journal, future and past
- future_journals = journal.get_future_continuations()
- past_journals = journal.get_past_continuations()
+@blueprint.route("/toc/")
+def toc(identifier=None):
+ """ Table of Contents page for a journal. identifier may be the journal id or an issn """
+ # If this route is changed, update JOURNAL_TOC_URL_FRAG in settings.py (partial ToC page link for journal CSV)
+
+ journal = find_toc_journal_by_identifier(identifier)
+ bibjson = journal.bibjson()
+ real_identifier = find_correct_redirect_identifier(identifier, bibjson)
+ if real_identifier:
+ return redirect(url_for('doaj.toc', identifier=real_identifier), 301)
+ else:
+ # now render all that information
+ return render_template('doaj/toc.html', journal=journal, bibjson=bibjson )
+
+
+@blueprint.route("/toc/articles/")
+@blueprint.route("/toc/articles//")
+@blueprint.route("/toc/articles///")
+def toc_articles(identifier=None, volume=None, issue=None):
+ journal = find_toc_journal_by_identifier(identifier)
+ bibjson = journal.bibjson()
+ real_identifier = find_correct_redirect_identifier(identifier, bibjson)
+ if real_identifier:
+ return redirect(url_for('doaj.toc_articles', identifier=real_identifier,
+ volume=volume, issue=issue), 301)
+ else:
+
+ if is_issn_by_identifier(identifier) and volume:
+ filters = [dao.Facetview2.make_term_filter('bibjson.journal.volume.exact', volume)]
+ if issue:
+ filters += [dao.Facetview2.make_term_filter('bibjson.journal.number.exact', issue)]
+ q = dao.Facetview2.make_query(filters=filters)
+
+ # The issn we are using to build the TOC
+ issn = bibjson.get_preferred_issn()
+ return redirect(url_for('doaj.toc', identifier=issn)
+ + '?source=' + dao.Facetview2.url_encode_query(q))
- # extract the bibjson, which is what the template is after, and whether the record is in doaj
- #future = [j.bibjson() j for j in future_journals]
- #past = [j.bibjson() for j in past_journals]
+ # now render all that information
+ return render_template('doaj/toc_articles.html', journal=journal, bibjson=bibjson )
- # now render all that information
- return render_template('doaj/toc.html', journal=journal, bibjson=bibjson, future=future_journals, past=past_journals,
- toc_issns=journal.bibjson().issns())
#~~->Article:Page~~
diff --git a/portality/view/oaipmh.py b/portality/view/oaipmh.py
index 5006c13f02..73a5158dfe 100644
--- a/portality/view/oaipmh.py
+++ b/portality/view/oaipmh.py
@@ -305,7 +305,7 @@ def get_record(dao, base_url, specified_oai_endpoint, identifier=None, metadata_
def identify(dao, base_url):
repo_name = app.config.get("SERVICE_NAME")
- admin_email = app.config.get("ADMIN_EMAIL")
+ admin_email = app.config.get("OAI_ADMIN_EMAIL", app.config.get("ADMIN_EMAIL"))
idobj = Identify(base_url, repo_name, admin_email)
idobj.earliest_datestamp = dao.earliest_datestamp()
return idobj
diff --git a/portality/view/publisher.py b/portality/view/publisher.py
index 2410a8323a..dfa76e7d07 100644
--- a/portality/view/publisher.py
+++ b/portality/view/publisher.py
@@ -4,7 +4,7 @@
from portality.app_email import EmailException
from portality import models
-from portality.bll.exceptions import AuthoriseException, ArticleMergeConflict, DuplicateArticleException
+from portality.bll.exceptions import AuthoriseException, ArticleMergeConflict, DuplicateArticleException, ArticleNotAcceptable
from portality.decorators import ssl_required, restrict_to_role, write_required
from portality.dao import ESMappingMissingError
from portality.forms.application_forms import ApplicationFormFactory
@@ -290,8 +290,8 @@ def preservation():
# check if collection has been assigned for the user
# collection must be in the format {"user_id1",["collection_name1","collection_id1"],
# "user_id2",["collection_name2","collection_id2"]}
- collection_available = True
- collection_dict = app.config.get("PRESERVATION_COLLECTION")
+ collection_dict = app.config.get("PRESERVATION_COLLECTION", {})
+ collection_available = True if collection_dict else False
if collection_dict and not current_user.id in collection_dict:
collection_available = False
elif collection_dict:
@@ -362,7 +362,8 @@ def metadata():
Messages.flash(Messages.ARTICLE_METADATA_MERGE_CONFLICT)
except DuplicateArticleException:
Messages.flash(Messages.ARTICLE_METADATA_UPDATE_CONFLICT)
-
+ except ArticleNotAcceptable as e:
+ Messages.flash_with_param(e.message, "error")
return fc.render_template(validated=validated)
diff --git a/production.cfg b/production.cfg
index e359b15c18..19ad96abad 100644
--- a/production.cfg
+++ b/production.cfg
@@ -3,11 +3,9 @@
ELASTICSEARCH_HOSTS = [{'host': '10.131.191.132', 'port': 9200}, {'host': '10.131.191.133', 'port': 9200}]
INDEX_PER_TYPE_SUBSTITUTE = '_doc'
- # doaj-public-app-1 doaj-background-app-1
-APP_MACHINES_INTERNAL_IPS = ['10.131.191.139:5050', '10.131.12.33:5050']
+ # doaj-public-app-1 doaj-background-app-1 doaj-editor-app-1
+APP_MACHINES_INTERNAL_IPS = ['10.131.191.139:5050', '10.131.12.33:5050', '10.131.56.133:5050']
- # doaj-public-app-1 doaj-bg-app-1 doaj-background-app-1
-#APP_MACHINES_INTERNAL_IPS = ['10.131.191.139:5050', '10.131.56.133:5050', '10.131.12.33:5050']
# The app is served via nginx / cloudlflare - they handle SSL
SSL = False
diff --git a/setup.py b/setup.py
index 8f511acaa3..e67ca4e4af 100644
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@
setup(
name='doaj',
- version='6.3.15',
+ version='6.4.5',
packages=find_packages(),
install_requires=[
"awscli==1.20.50",