-
Notifications
You must be signed in to change notification settings - Fork 7
117 lines (104 loc) · 2.98 KB
/
push-dataset.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
name: Synch uploaded jsonl files to HuggingFace
on:
workflow_call:
inputs:
datasource:
type: string
required: true
coda_token:
type: string
required: true
db_user:
type: string
required: true
db_password:
type: string
required: true
db_host:
type: string
required: true
workflow_dispatch: # allow manual triggering
inputs:
datasource:
description: 'The datasource to process'
type: choice
default: all
options:
- agentmodels
- aiimpacts
- aisafety.camp
- aisafety.info
- ai_alignment_playlist
- ai_explained
- ai_safety_talks
- ai_safety_reading_group
- ai_tech_tu_delft
- alignmentforum
- arbital
- arxiv
- carado.moe
- cold_takes
- deepmind_blog
- deepmind_technical_blog
- distill
- eaforum
- eleuther.ai
- gdocs
- generative.ink
- gwern_blog
- html_articles
- importai
- jsteinhardt_blog
- lesswrong
- markdown
- miri
- ml_safety_newsletter
- openai.research
- pdfs
- rob_miles_ai_safety
- vkrakovna_blog
- yudkowsky_blog
- xmls
jobs:
generate-dataset:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Setup Python environment
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Install dependencies
run: pip install -r requirements.txt
- name: Generate dataset file
env:
CODA_TOKEN: ${{ secrets.CODA_TOKEN || inputs.coda_token }}
YOUTUBE_API_KEY: ${{ secrets.YOUTUBE_API_KEY || inputs.youtube_api_key }}
ARD_DB_USER: ${{ secrets.ARD_DB_USER || inputs.db_user }}
ARD_DB_PASSWORD: ${{ secrets.ARD_DB_PASSWORD || inputs.db_password }}
ARD_DB_HOST: ${{ secrets.ARD_DB_HOST || inputs.db_host }}
ARD_DB_NAME: alignment_research_dataset
run: python main.py generate_jsonl_files ${{ inputs.datasource }}
- name: Upload Artifact
uses: actions/upload-artifact@v3
with:
name: ${{ inputs.datasource }}
path: data/${{ inputs.datasource }}.jsonl
retention-days: 1
upload:
runs-on: ubuntu-latest
needs: generate-dataset
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Setup Python environment
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Install dependencies
run: pip install -r requirements.txt
- name: Setup Huggingface client
run: pip install huggingface_hub gdown jsonlines datasets
- name: Upload files
run: python upload_to_huggingface.py ${{ secrets.HUGGINGFACE_TOKEN }} ${{ inputs.datasource }}