Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MNT/STY/TST: Add pre-commit hooks, CI job, standardize style #183

Merged
merged 7 commits into from
Apr 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
[flake8]
exclude = .git,__pycache__,build,dist,
max-line-length = 88
select = C,E,F,W,B,B950
extend-ignore = E203, E501, E226, W503, W504

# Explanation section:
# B950
# This takes into account max-line-length but only triggers when the value
# has been exceeded by more than 10% (96 characters).
# E203: Whitespace before ':'
# This is recommended by black in relation to slice formatting.
# E501: Line too long (82 > 79 characters)
# Our line length limit is 88 (above 79 defined in E501). Ignore it.
# E226: Missing whitespace around arithmetic operator
# This is a stylistic choice which you'll find everywhere in pcdsdevices, for
# example. Formulas can be easier to read when operators and operands
# have no whitespace between them.
#
# W503: Line break occurred before a binary operator
# W504: Line break occurred after a binary operator
# flake8 wants us to choose one of the above two. Our choice
# is to make no decision.
53 changes: 53 additions & 0 deletions .github/workflows/standard.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
name: PCDS Standard Testing

on:
push:
pull_request:
release:
types:
- created

jobs:
get_changed_files:
name: "get_changed_files"
runs-on: ubuntu-20.04
outputs:
PROJECT_FILES: ${{ steps.changed_files.outputs.PROJECT_FILES }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: 'recursive'

- name: Find all changed files
id: changed_files
if: ${{ github.event_name == 'pull_request' }}
run: |
# Fancy method of saying:
# * What files changed from this PR (the current checked out HEAD)
# * To the base that we're merging into (e.g., origin/master)
# Note that ACMRT = added/copied/modified/renamed/type change
# See more in `man git-diff-tree` under `--diff-filter`
git diff-tree --no-commit-id --name-only --diff-filter=ACM -r -z origin/${{ github.base_ref }} HEAD \
| tee "$HOME/project_files.txt"


if [ ! -s "$HOME/project_files.txt" ]; then
echo "No source code files changed in this PR. Checking the entire repository."
find "." -print0 -type f \
> "$HOME/project_files.txt"
fi

# replace nulls with spaces
sed -i 's/\x0/ /g' $HOME/project_files.txt

# set output
echo "PROJECT_FILES=$(<$HOME/project_files.txt)" >> "$GITHUB_OUTPUT"

pre-commit:
needs:
- get_changed_files
name: 'pre-commit checks'
uses: pcdshub/pcds-ci-helpers/.github/workflows/pre-commit.yml@master
with:
args: "--files ${{ needs.get_changed_files.outputs.PROJECT_FILES }}"
34 changes: 34 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: no-commit-to-branch
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-ast
- id: check-case-conflict
- id: check-json
- id: check-merge-conflict
- id: check-symlinks
- id: check-xml
- id: check-yaml
exclude: '^(conda-recipe/meta.yaml)$'
- id: debug-statements

- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.10.0.1
hooks:
- id: shellcheck
args: [-x] # allow source files outside of checked files

- repo: https://github.com/pycqa/flake8.git
rev: 6.0.0
hooks:
- id: flake8

- repo: https://github.com/timothycrosley/isort
rev: 5.11.5
hooks:
- id: isort
1 change: 1 addition & 0 deletions .shellcheckrc
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
disable=SC1091 # allow sourcing files that can't immediately be found (pcds_conda)
3 changes: 2 additions & 1 deletion scripts/BLmotors
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,9 @@ The script will generate a report for:
3. If the current configuration matches or not with the
appropriate values.
"""
import os
import argparse
import os

import ophyd.signal
from pmgr import pmgrAPI
from prettytable import PrettyTable
Expand Down
49 changes: 26 additions & 23 deletions scripts/detector_totals.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,16 @@
Script for the detector group to pull totals out of the elog.
We get DAQ run parameters for all experiments and count them up.
"""
import sys
import logging
import argparse
import logging
import sys
from collections import OrderedDict
import requests
import pytz

import dateutil.parser as dateparser
import pytz
import requests
from krtc import KerberosTicket


logger = logging.getLogger(__name__)
krbheaders = KerberosTicket("[email protected]").getAuthHeaders()
tz = pytz.timezone('America/Los_Angeles')
Expand Down Expand Up @@ -57,10 +57,11 @@ def getExperiments(run_period, after, before):
except those whose first run is after the specified time
"""
resp = requests.get(
f"{lgbkprefix}/ws/experiments",
params={"categorize": "instrument_runperiod",
"sortby": "name"},
headers=krbheaders).json()
f"{lgbkprefix}/ws/experiments",
params={"categorize": "instrument_runperiod",
"sortby": "name"},
headers=krbheaders
).json()
exps = []
for k, v in resp["value"].items():
insexps = map(lambda x: x["_id"], v.get("Run " + str(run_period), []))
Expand All @@ -73,27 +74,29 @@ def first_run_exists(exp):
return exp.get("first_run", {}).get("begin_time", None)

def last_run_before_specified_after(exp):
return exp.get("last_run", {}).get("begin_time", None) \
and dateparser.parse(exp["last_run"]["begin_time"])\
.astimezone(tz) < after
return (exp.get("last_run", {}).get("begin_time", None)
and dateparser.parse(exp["last_run"]["begin_time"])
.astimezone(tz) < after)

def first_run_after_specified_before(exp):
return exp.get("first_run", {}).get("begin_time", None) \
and dateparser.parse(exp["first_run"]["begin_time"])\
.astimezone(tz) > before
return (exp.get("first_run", {}).get("begin_time", None)
and dateparser.parse(exp["first_run"]["begin_time"]).astimezone(tz) > before)

sef = None
if after and before:
sef = lambda x: last_run_exists(x) \
and not last_run_before_specified_after(x) \
and first_run_exists(x) \
and not first_run_after_specified_before(x)
def sef(x):
return (last_run_exists(x)
and not last_run_before_specified_after(x)
and first_run_exists(x)
and not first_run_after_specified_before(x))
elif after:
sef = lambda x: last_run_exists(x) \
and not last_run_before_specified_after(x)
def sef(x):
return (last_run_exists(x)
and not last_run_before_specified_after(x))
elif before:
sef = lambda x: first_run_exists(x) \
and not first_run_after_specified_before(x)
def sef(x):
return (first_run_exists(x)
and not first_run_after_specified_before(x))

if sef:
expsset = set(exps)
Expand Down
40 changes: 23 additions & 17 deletions scripts/elog_par_post
Original file line number Diff line number Diff line change
@@ -1,45 +1,50 @@
#!/usr/bin/env python
import requests
from requests.auth import HTTPBasicAuth
import argparse
import socket

import requests
from requests.auth import HTTPBasicAuth


def readRunTable(fname):
runTableData={}
runTableData = {}
with open(fname) as reader:
lines = reader.readlines()
for line in lines:
try:
print(line)
runTableData[line.split()[0]]=line.split()[1]
except:
runTableData[line.split()[0]] = line.split()[1]
except Exception:
pass
return runTableData


def postRunTableDict(runtable_data, experiment, run):
args_url="https://pswww.slac.stanford.edu/ws-auth/lgbk/"
args_url = "https://pswww.slac.stanford.edu/ws-auth/lgbk/"
ws_url = args_url + "/run_control/{0}/ws/add_run_params".format(experiment)
print('URL:',ws_url)
user=experiment[:3]+'opr'
elogPostFile='/cds/home/opr/%s/forElogPost.txt'%user
hostname=socket.gethostname()
if hostname.find('sdf')>=0:
elogPostFile='/sdf/group/lcls/ds/tools/forElogPost.txt'
with open(elogPostFile,'r') as reader:
print('URL:', ws_url)
user = experiment[:3] + 'opr'
elogPostFile = '/cds/home/opr/%s/forElogPost.txt' % user
hostname = socket.gethostname()
if hostname.find('sdf') >= 0:
elogPostFile = '/sdf/group/lcls/ds/tools/forElogPost.txt'
with open(elogPostFile, 'r') as reader:
answer = reader.readline()
r = requests.post(ws_url, params={"run_num": run}, json=runtable_data, \
auth=HTTPBasicAuth(experiment[:3]+'opr', answer[:-1]))
r = requests.post(ws_url, params={"run_num": run}, json=runtable_data,
auth=HTTPBasicAuth(experiment[:3] + 'opr', answer[:-1]))
print(r)


def postRunTable(fname, experiment, run):
print(fname, experiment, run)
if fname=='pedestal':
dataDict={'pedestal':'done'}
if fname == 'pedestal':
dataDict = {'pedestal': 'done'}
else:
dataDict = readRunTable(fname)
print(dataDict)
postRunTableDict(dataDict, experiment, run)


def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-r', '--run', help='run number',
Expand All @@ -51,5 +56,6 @@ def main():

postRunTable(args.file, args.experiment, args.run)


if __name__ == '__main__':
main()
7 changes: 4 additions & 3 deletions scripts/epicsArchChecker
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,9 @@ of reports:
3. PVs no connected.
4. Files that do not exist.
"""
import os
import argparse
import os

import ophyd.signal
from prettytable import PrettyTable

Expand Down Expand Up @@ -119,7 +120,7 @@ def create_Lists(entries):
myPVs.append(entry[1])
myFiles.append(entry[2])
lineNumbers.append('№ ' + str(entry[3]))
return(myKeys, myPVs, myFiles, lineNumbers)
return (myKeys, myPVs, myFiles, lineNumbers)


def find_index(myKeys, myPVs, myFiles):
Expand All @@ -146,7 +147,7 @@ def find_index(myKeys, myPVs, myFiles):
if dmyPVs[dpv] == myPVs[pv]:
indPVs.append(pv)

return(indKeys, indPVs)
return (indKeys, indPVs)


def report_duplicates(indKeys, indPVs, myKeys, myPVs, myFiles, numLines):
Expand Down
41 changes: 22 additions & 19 deletions scripts/epix_gains
Original file line number Diff line number Diff line change
@@ -1,29 +1,30 @@
#!/usr/bin/env python

import psana
from Detector.UtilsEpix10ka import find_gain_mode
from Detector.UtilsEpix10ka import info_pixel_gain_mode_statistics_for_raw
import sys
import argparse
import sys

import psana
from Detector.UtilsEpix10ka import (find_gain_mode,
info_pixel_gain_mode_statistics_for_raw)

parser = argparse.ArgumentParser()
parser.add_argument('--run', help='run', type=str)
parser.add_argument('--experiment', help='experiment name', type=str)
args = parser.parse_args()

exprun = 'exp=%s:run=%s'%(args.experiment,args.run)
exprun = 'exp=%s:run=%s' % (args.experiment, args.run)

ds = psana.DataSource(exprun+':smd')

aliases = []
for dn in psana.DetNames():
if dn[1]!='':
if dn[1] != '':
alias = dn[1]
else:
alias = dn[0]
if alias.find('10k')>0:
if alias.find('10k') > 0:
aliases.append(alias)
if len(alias)==0:
if len(alias) == 0:
print('did not find an epix10k, quit.')
sys.exit()

Expand All @@ -33,29 +34,31 @@ for ialias, alias in enumerate(aliases):
epixs.append(psana.Detector(alias))

for run in ds.runs():
print 'run loop'
for nstep,step in enumerate(run.steps()):
print('run loop')
for nstep, step in enumerate(run.steps()):
for epix in epixs:
print '*** config gain mode: %s \t'%epix.name, find_gain_mode(det=epix)
print('*** config gain mode: %s \t' % epix.name, find_gain_mode(det=epix))
pvList = cd().pvLabels()
for pv in pvList:
print 'Step',nstep,'name/value:',pv.name(),pv.value()
print('Step', nstep, 'name/value:', pv.name(), pv.value())
found = False
nbad = 0
raws=[]
raws = []
for epix in epixs:
raws.append(None)
for nevt,evt in enumerate(step.events()):
for nevt, evt in enumerate(step.events()):
if not found:
for iepix,epix in enumerate(epixs):
for iepix, epix in enumerate(epixs):
found = True
if epix.raw(evt) is None:
nbad += 1
raws[iepix]=None
raws[iepix] = None
found = False
else:
raws[iepix]=epix.raw(evt)
raws[iepix] = epix.raw(evt)
if found:
for iepix,epix in enumerate(epixs):
print '--- event gain mode for %s \t'%epix.name,find_gain_mode(det=epix,data=raws[iepix]),info_pixel_gain_mode_statistics_for_raw(epix, raws[iepix])
for iepix, epix in enumerate(epixs):
print('--- event gain mode for %s \t' % epix.name,
find_gain_mode(det=epix, data=raws[iepix]),
info_pixel_gain_mode_statistics_for_raw(epix, raws[iepix]))
pass
Loading