From 7197291b5ccc245c7c5b0a7828fbaa8b2810031b Mon Sep 17 00:00:00 2001 From: tangkong Date: Wed, 24 Apr 2024 10:34:08 -0700 Subject: [PATCH 1/7] MNT: add pre-commit tooling and rc files --- .flake8 | 23 +++++++++++++++++++++++ .pre-commit-config.yaml | 34 ++++++++++++++++++++++++++++++++++ .shellcheckrc | 1 + 3 files changed, 58 insertions(+) create mode 100644 .flake8 create mode 100644 .pre-commit-config.yaml create mode 100644 .shellcheckrc diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..a1ee93df --- /dev/null +++ b/.flake8 @@ -0,0 +1,23 @@ +[flake8] +exclude = .git,__pycache__,build,dist, +max-line-length = 88 +select = C,E,F,W,B,B950 +extend-ignore = E203, E501, E226, W503, W504 + +# Explanation section: +# B950 +# This takes into account max-line-length but only triggers when the value +# has been exceeded by more than 10% (96 characters). +# E203: Whitespace before ':' +# This is recommended by black in relation to slice formatting. +# E501: Line too long (82 > 79 characters) +# Our line length limit is 88 (above 79 defined in E501). Ignore it. +# E226: Missing whitespace around arithmetic operator +# This is a stylistic choice which you'll find everywhere in pcdsdevices, for +# example. Formulas can be easier to read when operators and operands +# have no whitespace between them. +# +# W503: Line break occurred before a binary operator +# W504: Line break occurred after a binary operator +# flake8 wants us to choose one of the above two. Our choice +# is to make no decision. diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..e8582abb --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,34 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: no-commit-to-branch + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-ast + - id: check-case-conflict + - id: check-json + - id: check-merge-conflict + - id: check-symlinks + - id: check-xml + - id: check-yaml + exclude: '^(conda-recipe/meta.yaml)$' + - id: debug-statements + +- repo: https://github.com/shellcheck-py/shellcheck-py + rev: v0.10.0.1 + hooks: + - id: shellcheck + args: [-x] # allow source files outside of checked files + +- repo: https://github.com/pycqa/flake8.git + rev: 6.0.0 + hooks: + - id: flake8 + +- repo: https://github.com/timothycrosley/isort + rev: 5.11.5 + hooks: + - id: isort diff --git a/.shellcheckrc b/.shellcheckrc new file mode 100644 index 00000000..08c7738e --- /dev/null +++ b/.shellcheckrc @@ -0,0 +1 @@ +disable=SC1091 # allow sourcing files that can't immediately be found (pcds_conda) From 7e990f180360a63fc5cf457880a823d9c1429050 Mon Sep 17 00:00:00 2001 From: tangkong Date: Wed, 24 Apr 2024 10:34:49 -0700 Subject: [PATCH 2/7] TST: add pre-commit ci test --- .github/workflows/standard.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .github/workflows/standard.yml diff --git a/.github/workflows/standard.yml b/.github/workflows/standard.yml new file mode 100644 index 00000000..8f1f47aa --- /dev/null +++ b/.github/workflows/standard.yml @@ -0,0 +1,15 @@ +name: PCDS Standard Testing + +on: + push: + pull_request: + release: + types: + - created + +jobs: + pre-commit: + name: "pre-commit checks" + uses: pcdshub/pcds-ci-helpers/.github/workflows/pre-commit.yml@master + with: + args: "--all-files" From 64ec6c97c1fdb7706369a94e714429baf98dfbb7 Mon Sep 17 00:00:00 2001 From: tangkong Date: Wed, 24 Apr 2024 10:36:09 -0700 Subject: [PATCH 3/7] MNT: format questionnaire_tools --- scripts/questionnaire_tools | 216 ++++++++++++++++++------------------ 1 file changed, 109 insertions(+), 107 deletions(-) diff --git a/scripts/questionnaire_tools b/scripts/questionnaire_tools index 8f8af525..eba250ed 100755 --- a/scripts/questionnaire_tools +++ b/scripts/questionnaire_tools @@ -1,8 +1,10 @@ #!/reg/g/pcds/pyps/conda/py36/envs/pcds-1.2.5/bin/python +import argparse +import json import sys + from psdm_qs_cli import QuestionnaireClient -import json -import argparse + class QuestionnaireTools(): @@ -10,58 +12,57 @@ class QuestionnaireTools(): self.qc = QuestionnaireClient(**kwargs) self.exp_dict = self.qc.getExpName2URAWIProposalIDs() self.deviceDict = json.loads(open("deviceDict.json").read()) - self.allowedLocations=["Hutch-main experimental", - "Hutch-downbeam area", - "XPP goniometer", - "XPP robot", - "XPP SB4", - "XPP alcove", - "XCS goniometer", - "XCS LADM", - "MFX detector table", - "MFX sample table", - "MFX robot", - "MFX DG3", - "CXI SC1", - "CXI SC2", - "CXI SC3", - "CXI DSA", - "CXI DSB", - "CXI DG3", - "CXI DG4", - "MEC chamber"] + self.allowedLocations = ["Hutch-main experimental", + "Hutch-downbeam area", + "XPP goniometer", + "XPP robot", + "XPP SB4", + "XPP alcove", + "XCS goniometer", + "XCS LADM", + "MFX detector table", + "MFX sample table", + "MFX robot", + "MFX DG3", + "CXI SC1", + "CXI SC2", + "CXI SC3", + "CXI DSA", + "CXI DSB", + "CXI DG3", + "CXI DG4", + "MEC chamber"] def _propRun_from_name(self, name): - if len(name)<=4: + if len(name) <= 4: try: proposal, runNo = self._propRun_from_propname(name) - except: - print('could not find experiment %s in database'%name) + except Exception: + print('could not find experiment %s in database' % name) sys.exit() else: try: proposal, runNo = self._propRun_from_expname(name) - except: - print('could not find experiment %s in database'%name) + except Exception: + print('could not find experiment %s in database' % name) sys.exit() return proposal, runNo - def _propRun_from_propname(self, propname): - runStrs=['run18','run17', 'run16', 'run15'] + runStrs = ['run18', 'run17', 'run16', 'run15'] for runStr in runStrs: if propname in self.qc.getProposalsListForRun(runStr).keys(): return propname, runStr def _propRun_from_expname(self, expname): try: - propDict=self.qc.lookupByExperimentName(expname) - if propDict!={}: - return propDict['proposal_id'],propDict['run_period'] - except: + propDict = self.qc.lookupByExperimentName(expname) + if propDict != {}: + return propDict['proposal_id'], propDict['run_period'] + except Exception: try: - return self.exp_dict[expname],'run%02d'%int(expname[-2:]) - except KeyError: + return self.exp_dict[expname], 'run%02d' % int(expname[-2:]) + except KeyError: err = '{} is not a valid experiment name' raise ValueError(err.format(expname)) @@ -72,57 +73,59 @@ class QuestionnaireTools(): return False def _getProposalList(self, runs=[18], instrument=None): - proposalList=[] + proposalList = [] for key in self.exp_dict.keys(): try: runNo = int(key[-2:]) if runNo not in runs: continue - if instrument is not None and keys[:3]!=instrument.lower(): + if instrument is not None and key[:3] != instrument.lower(): continue proposalList.append(self.exp_dict[key]) - except: - pass + except KeyError: + pass return proposalList def _getExperimentList(self, runs=[18], instrument=None): - proposalList=[] + proposalList = [] for key in self.exp_dict.keys(): try: runNo = int(key[-2:]) if runNo not in runs: continue - if instrument is not None and keys[:3]!=instrument.lower(): + if instrument is not None and key[:3] != instrument.lower(): continue proposalList.append(key) - except: - pass + except KeyError: + pass return proposalList - - def _getProposalDetails(self, proposal, run_no, keyFilter=['pcdssetup','hutch-be']): + + def _getProposalDetails(self, proposal, run_no, keyFilter=['pcdssetup', 'hutch-be']): if isinstance(run_no, int): - run_no='run%02d'%run_no + run_no = 'run%02d' % run_no if not self._isPropname(proposal): - print('This is not a proposal %s'%proposal) + print('This is not a proposal %s' % proposal) return raw = self.qc.getProposalDetailsForRun(run_no, proposal) - if keyFilter==[]: + if keyFilter == []: return raw filteredDict = {} for field in raw.keys(): for desiredKey in keyFilter: - if field.find(desiredKey)>=0: - filteredDict[field]=raw[field] + if field.find(desiredKey) >= 0: + filteredDict[field] = raw[field] return filteredDict def _setProposalDetails(self, runNo, proposal, detailDict): for key in detailDict: try: - print('update attribute %s to %s for proposal %s in %s'%(key, detailDict[key], proposal, runNo)) + print('update attribute %s to %s for proposal %s in %s' + % (key, detailDict[key], proposal, runNo)) self.qc.updateProposalAttribute(runNo, proposal, key, detailDict[key]) - except: - print('failed to update attribute %s to %s for proposal %s in %s'%(key, detailDict[key], proposal, runNo)) + except Exception: + print('failed to update attribute %s to %s for proposal %s in %s' + % (key, detailDict[key], proposal, runNo)) def copyProposal(self, expnameIn, expnameOut): proposalIn, runNoIn = self._propRun_from_name(expnameIn) @@ -132,15 +135,15 @@ class QuestionnaireTools(): self._setProposalDetails(runNoOut, proposalOut, proposalInDetails) def _parseDeviceName(self, inputDevice): - device={} + device = {} deviceInfo = inputDevice.split(',') - device['name']=deviceInfo.pop(0) + device['name'] = deviceInfo.pop(0) for info in deviceInfo: try: - field,value = info.split('-') - device[field]=value - except: - print('I cannot parse this, will ignore %s'%info) + field, value = info.split('-') + device[field] = value + except KeyError: + print('I cannot parse this, will ignore %s' % info) return device def addDeviceToProposal(self, expname, devicename, location=None): @@ -148,50 +151,56 @@ class QuestionnaireTools(): deviceSetup = self._parseDeviceName(devicename) if deviceSetup['name'] not in self.deviceDict.keys(): - print('device %s is not in predefined device list, available are:'%devicename) + print(f'device {devicename} is not in predefined device list, available are:') print(self.deviceDict.keys) addDevice = self.deviceDict[deviceSetup['name']] - #get current pcds fields from proposal. + # get current pcds fields from proposal. proposalDetails = self._getProposalDetails(proposal, runNo) if location is not None: - print(location); print(location in self.allowedLocations) + print(location) + print(location in self.allowedLocations) for lineItem in addDevice.keys(): - #first find last kind of this device in current list. - numNewDevice=0 + # first find last kind of this device in current list. + numNewDevice = 0 for detail in proposalDetails.keys(): - if detail.find(lineItem)==0: - numDev = int(detail.replace('%s-'%lineItem,'').split('-')[0]) - if numDev>numNewDevice: numNewDevice=numDev+1 + if detail.find(lineItem) == 0: + numDev = int(detail.replace('%s-' % lineItem, '').split('-')[0]) + if numDev > numNewDevice: + numNewDevice = numDev + 1 for item in addDevice[lineItem]: - if lineItem=='pcdssetup-motors' and 'pvbase' not in item.keys(): - item['pvbase']='%s:USR:MMS:'%(expname[:3].upper()) + if lineItem == 'pcdssetup-motors' and 'pvbase' not in item.keys(): + item['pvbase'] = '%s:USR:MMS:' % (expname[:3].upper()) if 'location' in deviceSetup.keys() and \ deviceSetup['location'] in self.allowedLocations and \ - (lineItem=='pcdssetup-motors' or lineItem=='pcdssetup-areadet') \ + (lineItem == 'pcdssetup-motors' or lineItem == 'pcdssetup-areadet') \ and 'location' not in item.keys(): - item['location']=location + item['location'] = location if 'basename' in deviceSetup.keys(): - item['name']='%s_%s'%(deviceSetup['basename'],item['item']) + item['name'] = '%s_%s' % (deviceSetup['basename'], item['item']) for field in item.keys(): fieldValue = item[field] - fieldName = '%s-%d-%s'%(lineItem,numNewDevice,field) - print('update field %s to %s for proposal %s in run %s'%(fieldName, fieldValue,proposal, runNo)) - self.qc.updateProposalAttribute(runNo, proposal, fieldName, fieldValue) - numNewDevice=numNewDevice+1 - + fieldName = '%s-%d-%s' % (lineItem, numNewDevice, field) + print('update field %s to %s for proposal %s in run %s' % + (fieldName, fieldValue, proposal, runNo)) + self.qc.updateProposalAttribute(runNo, proposal, fieldName, fieldValue) + numNewDevice = numNewDevice + 1 + + parser = argparse.ArgumentParser() -parser.add_argument("-f","--fromExp", help="experiment to copy from") -parser.add_argument("-t","--toExp", help="experiment to copy to") -parser.add_argument("-r","--readExp", help="experiment to read CDS tag from") -parser.add_argument("-c","--copy_CDS", help="copy data from CDS tab", action='store_true') -parser.add_argument("-d","--add_device", help="name of device to be added") -parser.add_argument("-l","--list_devices", help="list device to be added", action='store_true') -parser.add_argument("-p","--print_device", help="print data for device") +parser.add_argument("-f", "--fromExp", help="experiment to copy from") +parser.add_argument("-t", "--toExp", help="experiment to copy to") +parser.add_argument("-r", "--readExp", help="experiment to read CDS tag from") +parser.add_argument("-c", "--copy_CDS", help="copy data from CDS tab", + action='store_true') +parser.add_argument("-d", "--add_device", help="name of device to be added") +parser.add_argument("-l", "--list_devices", help="list device to be added", + action='store_true') +parser.add_argument("-p", "--print_device", help="print data for device") parser.add_argument("--dev", help="connect to dev database", action='store_true') parser.add_argument("--experimentList", help="list of experiments", action='store_true') parser.add_argument("--propList", help="list of proposals", action='store_true') @@ -209,12 +218,13 @@ if args.list_devices: if args.print_device is not None: devd = json.loads(open("deviceDict.json").read()) if args.print_device not in devd.keys(): - print('device %s is not present in device list, avaiable devices are:'%args.print_device) + print('device %s is not present in device list, avaiable devices are:' + % args.print_device) print(devd.keys()) else: devDict = devd[args.print_device] for key in devDict: - print('add to section %s'%key) + print('add to section %s' % key) for item in devDict[key]: print(item) if args.toExp is None: @@ -223,48 +233,40 @@ if args.print_device is not None: kerb_url = 'https://pswww.slac.stanford.edu/ws-kerb/questionnaire/' kerb_url_dev = 'https://pswww-dev.slac.stanford.edu/ws-kerb/questionnaire/' -#qs = QuestionnaireTools() if args.dev: - qs = QuestionnaireTools(url=kerb_url_dev) + qs = QuestionnaireTools(url=kerb_url_dev) else: - qs = QuestionnaireTools(url=kerb_url) - + qs = QuestionnaireTools(url=kerb_url) if args.propList: print('please be aware that this script takes experiment, not proposal names') - print(qs._getProposalList(runs=[18,17,16,15], instrument=None)) - #print('---------------------------') - #print(qs._getProposalList(runs=[17], instrument=None)) - #print(qs._getExperimentList(runs=[17], instrument=None)) + print(qs._getProposalList(runs=[18, 17, 16, 15], instrument=None)) sys.exit() if args.experimentList: - print(qs._getExperimentList(runs=[18,17,16,15], instrument=None)) - #print('---------------------------') - #print(qs._getProposalList(runs=[17], instrument=None)) - #print(qs._getExperimentList(runs=[17], instrument=None)) + print(qs._getExperimentList(runs=[18, 17, 16, 15], instrument=None)) sys.exit() if args.readExp is not None: - p,r=qs._propRun_from_name(args.readExp) - cdsDict = qs._getProposalDetails(p,r) + p, r = qs._propRun_from_name(args.readExp) + cdsDict = qs._getProposalDetails(p, r) for entry in cdsDict.keys(): - print('%s: \t %s'%(entry, cdsDict[entry])) + print('%s: \t %s' % (entry, cdsDict[entry])) if args.toExp is None: sys.exit() if args.toExp is None: - toExp=input('we need an experiment to update the questionnaire for') + toExp = input('we need an experiment to update the questionnaire for') else: toExp = args.toExp if args.add_device: deviceLoc = args.add_device.split(',') device = deviceLoc[0] - if len(deviceLoc)>1: - location=deviceLoc[1].replace('\'','\"') - print('a location is given:',location) + if len(deviceLoc) > 1: + location = deviceLoc[1].replace('\'', '\"') + print('a location is given:', location) qs.addDeviceToProposal(toExp, devicename=device, location=location) else: qs.addDeviceToProposal(toExp, devicename=device) @@ -273,6 +275,6 @@ if args.copy_CDS: if args.fromExp is None: fromExp = input('experiment to copy CDS data from:') else: - fromExp=args.fromExp - + fromExp = args.fromExp + qs.copyProposal(fromExp, toExp) From 800e121f721c8144dd39ae05563f43ef5b8c2cd9 Mon Sep 17 00:00:00 2001 From: tangkong Date: Wed, 24 Apr 2024 11:29:47 -0700 Subject: [PATCH 4/7] STY: format detector_totals --- scripts/detector_totals.py | 49 ++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/scripts/detector_totals.py b/scripts/detector_totals.py index 2ed4aeaf..4830314a 100755 --- a/scripts/detector_totals.py +++ b/scripts/detector_totals.py @@ -3,16 +3,16 @@ Script for the detector group to pull totals out of the elog. We get DAQ run parameters for all experiments and count them up. """ -import sys -import logging import argparse +import logging +import sys from collections import OrderedDict -import requests -import pytz + import dateutil.parser as dateparser +import pytz +import requests from krtc import KerberosTicket - logger = logging.getLogger(__name__) krbheaders = KerberosTicket("HTTP@pswww.slac.stanford.edu").getAuthHeaders() tz = pytz.timezone('America/Los_Angeles') @@ -57,10 +57,11 @@ def getExperiments(run_period, after, before): except those whose first run is after the specified time """ resp = requests.get( - f"{lgbkprefix}/ws/experiments", - params={"categorize": "instrument_runperiod", - "sortby": "name"}, - headers=krbheaders).json() + f"{lgbkprefix}/ws/experiments", + params={"categorize": "instrument_runperiod", + "sortby": "name"}, + headers=krbheaders + ).json() exps = [] for k, v in resp["value"].items(): insexps = map(lambda x: x["_id"], v.get("Run " + str(run_period), [])) @@ -73,27 +74,29 @@ def first_run_exists(exp): return exp.get("first_run", {}).get("begin_time", None) def last_run_before_specified_after(exp): - return exp.get("last_run", {}).get("begin_time", None) \ - and dateparser.parse(exp["last_run"]["begin_time"])\ - .astimezone(tz) < after + return (exp.get("last_run", {}).get("begin_time", None) + and dateparser.parse(exp["last_run"]["begin_time"]) + .astimezone(tz) < after) def first_run_after_specified_before(exp): - return exp.get("first_run", {}).get("begin_time", None) \ - and dateparser.parse(exp["first_run"]["begin_time"])\ - .astimezone(tz) > before + return (exp.get("first_run", {}).get("begin_time", None) + and dateparser.parse(exp["first_run"]["begin_time"]).astimezone(tz) > before) sef = None if after and before: - sef = lambda x: last_run_exists(x) \ - and not last_run_before_specified_after(x) \ - and first_run_exists(x) \ - and not first_run_after_specified_before(x) + def sef(x): + return (last_run_exists(x) + and not last_run_before_specified_after(x) + and first_run_exists(x) + and not first_run_after_specified_before(x)) elif after: - sef = lambda x: last_run_exists(x) \ - and not last_run_before_specified_after(x) + def sef(x): + return (last_run_exists(x) + and not last_run_before_specified_after(x)) elif before: - sef = lambda x: first_run_exists(x) \ - and not first_run_after_specified_before(x) + def sef(x): + return (first_run_exists(x) + and not first_run_after_specified_before(x)) if sef: expsset = set(exps) From 04832a292729faa7516f432f04f26ba01dc6befc Mon Sep 17 00:00:00 2001 From: tangkong Date: Wed, 24 Apr 2024 15:55:14 -0700 Subject: [PATCH 5/7] STY/MNT: format python files and update py2 to py3 --- scripts/get_info.py | 187 ++++++++++-------- scripts/hdf5_to_gif.py | 1 + .../pfeiffer_serial_tools/address_change.py | 64 +++--- scripts/pfeiffer_serial_tools/control.py | 157 +++++++-------- scripts/pyps-deploy | 10 +- scripts/xpp_update_happi_line.py | 9 +- 6 files changed, 222 insertions(+), 206 deletions(-) diff --git a/scripts/get_info.py b/scripts/get_info.py index 6aeab369..d61aef93 100644 --- a/scripts/get_info.py +++ b/scripts/get_info.py @@ -1,8 +1,11 @@ import argparse -import socket +import logging import os +import socket import sys +import requests + parser = argparse.ArgumentParser() parser.add_argument("--run", help="get last run", action='store_true') parser.add_argument("--exp", help="get experiment name", action='store_true') @@ -12,91 +15,98 @@ parser.add_argument("--station", help="optional station for hutch with two daqs, e.g. cxi and mfx") parser.add_argument("--getHutch", help="get hutch (uppercase)", action='store_true') parser.add_argument("--gethutch", help="get hutch (lowercase)", action='store_true') -parser.add_argument("--getstation", help="get hutch station (for multiple daqs)", action='store_true') -parser.add_argument("--getbase", help="get base daq name (hutch_station if multiple daqs, otherwise hutch)", action='store_true') -parser.add_argument("--getinstrument", help="get instrument (HUTCH_station if multiple daqs, otherwise hutch)", action='store_true') +parser.add_argument("--getstation", help="get hutch station (for multiple daqs)", + action='store_true') +parser.add_argument("--getbase", + help="get base daq name (hutch_station if multiple daqs, otherwise hutch)", + action='store_true') +parser.add_argument("--getinstrument", + help="get instrument (HUTCH_station if multiple daqs, otherwise hutch)", + action='store_true') parser.add_argument("--getcnf", help="get cnf file name)", action='store_true') parser.add_argument("--files_for_run", help="get xtc files for run") parser.add_argument("--nfiles_for_run", help="get xtc files for run") parser.add_argument("--setExp", help="set experiment name") args = parser.parse_args() -hutches=['tmo','txi','rix','xpp','xcs','mfx','cxi','mec', 'ued', 'det', 'lfe','kfe','tst', 'las', 'hpl'] -foundHutch=False -hutch='' +hutches = ['tmo', 'txi', 'rix', 'xpp', 'xcs', 'mfx', 'cxi', 'mec', 'ued', 'det', + 'lfe', 'kfe', 'tst', 'las', 'hpl'] +foundHutch = False +hutch = '' -#populate hutch-specific subnets here: -hutch_subnets={'tmo': ['28','132','133','134','135'], - 'txi': ['29','136','137','138','139'], - 'rix': ['31','144','145','146','147'], - 'xpp': ['22','84','85','86','87'], - 'xcs': ['25','80','81','82','83'], - 'cxi': ['26','68','69','70','71'], - 'mfx': ['24','72','73','74','75'], - 'mec': ['27','76','77','78','79'], - 'ued': ['36'], - 'det': ['58', '59'], - 'lfe': ['88','89','90','91'], - 'kfe': ['92','93','94','95'], - 'tst': ['23','148','149','150','151'], - 'las': ['35','160','161','162','163'], - 'hpl': ['64']} +# populate hutch-specific subnets here: +hutch_subnets = {'tmo': ['28', '132', '133', '134', '135'], + 'txi': ['29', '136', '137', '138', '139'], + 'rix': ['31', '144', '145', '146', '147'], + 'xpp': ['22', '84', '85', '86', '87'], + 'xcs': ['25', '80', '81', '82', '83'], + 'cxi': ['26', '68', '69', '70', '71'], + 'mfx': ['24', '72', '73', '74', '75'], + 'mec': ['27', '76', '77', '78', '79'], + 'ued': ['36'], + 'det': ['58', '59'], + 'lfe': ['88', '89', '90', '91'], + 'kfe': ['92', '93', '94', '95'], + 'tst': ['23', '148', '149', '150', '151'], + 'las': ['35', '160', '161', '162', '163'], + 'hpl': ['64']} if args.hutch: - hutch=args.hutch + hutch = args.hutch if hutch in hutches: - hutch=hutch.upper() + hutch = hutch.upper() else: for ihutch in hutches: - if hutch.find(ihutch.upper())>=0: - hutch=ihutch.upper() - foundHutch=True + if hutch.find(ihutch.upper()) >= 0: + hutch = ihutch.upper() + foundHutch = True break if not foundHutch: print('unknown_hutch') sys.exit() else: - hostname=socket.gethostname() - ip=socket.gethostbyname(hostname) - subnet=ip.split('.')[2] - for ihutch in hutches: #use the IP address to match the host to a hutch by subnet + hostname = socket.gethostname() + ip = socket.gethostbyname(hostname) + subnet = ip.split('.')[2] + for ihutch in hutches: # use the IP address to match the host to a hutch by subnet if subnet in hutch_subnets.get(ihutch): - hutch=ihutch.upper() - foundHutch=True + hutch = ihutch.upper() + foundHutch = True break if not foundHutch: for ihutch in hutches: - if hostname.find(ihutch)>=0: - hutch=ihutch.upper() - foundHutch=True + if hostname.find(ihutch) >= 0: + hutch = ihutch.upper() + foundHutch = True break if not foundHutch: - if hostname.find('psusr')>=0: - if hostname.find('psusr13')>=0: - hutch='XPP' - elif hostname.find('psusr21')>=0: - hutch='XCS' - elif hostname.find('psusr22')>=0: - hutch='CXI' - elif hostname.find('psusr23')>=0: - hutch='MEC' - elif hostname.find('psusr24')>=0: - hutch='MFX' - if hutch!='': - foundHutch=True + if hostname.find('psusr') >= 0: + if hostname.find('psusr13') >= 0: + hutch = 'XPP' + elif hostname.find('psusr21') >= 0: + hutch = 'XCS' + elif hostname.find('psusr22') >= 0: + hutch = 'CXI' + elif hostname.find('psusr23') >= 0: + hutch = 'MEC' + elif hostname.find('psusr24') >= 0: + hutch = 'MFX' + if hutch != '': + foundHutch = True else: - #then check current path - path=os.getcwd() + # then check current path + path = os.getcwd() for ihutch in hutches: - if path.find(ihutch)>=0: - hutch=ihutch.upper() - foundHutch=True + if path.find(ihutch) >= 0: + hutch = ihutch.upper() + foundHutch = True break - if not foundHutch and path.find('xrt')+hostname.find('xrt')>=-1 or path.find('xtod')+hostname.find('xtod')>=-1: - hutch='LFE' #because we have so many names for the same subnet. - foundHutch=True + if (not foundHutch and path.find('xrt')+hostname.find('xrt') >= -1 + or path.find('xtod') + hostname.find('xtod') >= -1): + hutch = 'LFE' # because we have so many names for the same subnet. + foundHutch = True if not foundHutch: - #then ask.....outside of python + # then ask.....outside of python print('unknown_hutch') sys.exit() if args.getHutch: @@ -106,21 +116,21 @@ print(hutch.lower()) sys.exit() -#if hutch.lower() in ['mfx','cxi']: +# if hutch.lower() in ['mfx','cxi']: if hutch.lower() in ['cxi']: nstations = 2 if args.station is not None: station = int(args.station) else: - hostname=socket.gethostname() + hostname = socket.gethostname() if 'monitor' in hostname: station = 1 else: station = 0 - daq_base = '{:}_{:}'.format(hutch.lower(),station) - instrument = '{:}:{:}'.format(hutch.upper(),station) + daq_base = '{:}_{:}'.format(hutch.lower(), station) + instrument = '{:}:{:}'.format(hutch.upper(), station) elif hutch.lower() in ['rix']: - station=2 + station = 2 else: daq_base = hutch.lower() instrument = hutch.upper() @@ -128,10 +138,10 @@ if args.station: station = int(args.station) else: - station=0 + station = 0 -if hutch.lower()!='rix' and station >= nstations: - print("Invalid --station={:} keyword set for hutch {:}".format(hutch)) +if hutch.lower() != 'rix' and station >= nstations: + print("Invalid --station={:} keyword set for hutch {:}".format(station, hutch)) sys.exit() if args.getstation: @@ -147,31 +157,32 @@ print(daq_base+'.cnf') sys.exit() -import requests -import logging ws_url = "https://pswww.slac.stanford.edu/ws/lgbk" logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) if args.exp: - resp = requests.get(ws_url + "/lgbk/ws/activeexperiment_for_instrument_station", {"instrument_name": hutch, "station": station}) + resp = requests.get(ws_url + "/lgbk/ws/activeexperiment_for_instrument_station", + {"instrument_name": hutch, "station": station}) exp = resp.json().get("value", {}).get("name") print(exp) if args.run: try: - resp = requests.get(ws_url + "/lgbk/ws/activeexperiment_for_instrument_station", {"instrument_name": hutch, "station": station}) + resp = requests.get(ws_url + "/lgbk/ws/activeexperiment_for_instrument_station", + {"instrument_name": hutch, "station": station}) exp = resp.json().get("value", {}).get("name") - rundoc = requests.get(ws_url + "/lgbk/" + exp + "/ws/current_run").json()["value"] + rundoc = requests.get(ws_url + "/lgbk/" + exp + "/ws/current_run").json()["value"] if not rundoc: - #logger.error("Invalid response from server") + # logger.error("Invalid response from server") print('No runs taken yet') - else: + else: if args.ended: if rundoc.get('end_time', None) is not None: print(int(rundoc['num'])) else: - print(int(rundoc['num'] - 1)) # Really bogus way to determine this; but copying over from previous code. + # Really bogus way to determine this; but copying over from previous code. + print(int(rundoc['num'] - 1)) else: print(int(rundoc['num'])) if args.live: @@ -179,7 +190,7 @@ print('live') else: print('ended') - except: + except Exception: logger.exception("No runs?") print('No runs taken yet') @@ -190,25 +201,27 @@ run = int(args.nfiles_for_run) if args.setExp: - exp=args.setExp + exp = args.setExp else: - resp = requests.get(ws_url + "/lgbk/ws/activeexperiment_for_instrument_station", {"instrument_name": hutch, "station": station}) + resp = requests.get(ws_url + "/lgbk/ws/activeexperiment_for_instrument_station", + {"instrument_name": hutch, "station": station}) exp = resp.json().get("value", {}).get("name") - currundoc = requests.get(ws_url + "/lgbk/" + exp + "/ws/current_run").json()["value"] + currundoc = requests.get(ws_url + "/lgbk/" + exp + "/ws/current_run").json()["value"] runLast = int(currundoc['num']) if run > runLast: - print('run %s not taken yet, last run is %s'%(run,runLast)) + print('run %s not taken yet, last run is %s' % (run, runLast)) else: - file_list = requests.get(ws_url + "/lgbk/" + exp + "/ws/" + str(run) + "/files_for_live_mode").json()["value"] + file_list = requests.get(ws_url + "/lgbk/" + exp + "/ws/" + str(run) + + "/files_for_live_mode").json()["value"] if args.files_for_run: for tfile in file_list: - print('/reg/d/psdm/'+tfile) + print('/reg/d/psdm/' + tfile) elif args.nfiles_for_run: - #look at files, remove stream 80, only first chunk, return number. - nFiles=0 + # look at files, remove stream 80, only first chunk, return number. + nFiles = 0 for tfile in file_list: tfilename = '/reg/d/psdm/'+tfile - if tfilename.find('c00')>=0 and tfilename.find('-s8')<0: - nFiles=nFiles+1 - print('%d %d'%(nFiles,len(file_list))) + if tfilename.find('c00') >= 0 and tfilename.find('-s8') < 0: + nFiles = nFiles + 1 + print('%d %d' % (nFiles, len(file_list))) diff --git a/scripts/hdf5_to_gif.py b/scripts/hdf5_to_gif.py index ae5c3305..25c976d2 100644 --- a/scripts/hdf5_to_gif.py +++ b/scripts/hdf5_to_gif.py @@ -1,4 +1,5 @@ import argparse + import h5py from PIL import Image diff --git a/scripts/pfeiffer_serial_tools/address_change.py b/scripts/pfeiffer_serial_tools/address_change.py index 84dbcd74..e4afb696 100755 --- a/scripts/pfeiffer_serial_tools/address_change.py +++ b/scripts/pfeiffer_serial_tools/address_change.py @@ -1,19 +1,17 @@ #!/usr/bin/env python - """ 2013-09-26 Awallace This script is used to change the RS485 address of Pfeiffer turbo pumps using a digi port. See help for basic usage. +2024-04-24 roberttk +Updated for python 3.9 """ - - - - -import argparse, socket +import argparse +import socket parser = argparse.ArgumentParser() -parser.add_argument("-a", "--current_address", default='unknown', - required=True, +parser.add_argument("-a", "--current_address", default='unknown', + required=True, help="Required, current address number, type unknown and the script will attempt to find the pump and change it.", metavar="Current Address", dest='curr_addr') @@ -25,52 +23,53 @@ parser.add_argument('-v', action='store_true') - args = parser.parse_args() + def checksum(string): total = 0 for char in string: total += ord(char) - return format((total % 256),'03d') + return format((total % 256), '03d') + def change_addr(curr, new, connection): - message = format(int(curr), '03d')+'1079706000'+ format(int(new), '03d') + message = format(int(curr), '03d') + '1079706000' + format(int(new), '03d') cs = checksum(message) - + message = message + cs + '\r' if args.v: - print 'Sending '+message + print('Sending ' + message) connection.send(message) - + response = connection.recv(BUFFER_SIZE) if args.v: - print response + print(response) return response - + def find_pump(connection): - print "Searching for a pump, shouldn't take more than 5 minutes..." - for i in range(1,256): - message = format(i, '03d')+'0031202=?' + print("Searching for a pump, shouldn't take more than 5 minutes...") + for i in range(1, 256): + message = format(i, '03d')+'0031202=?' connection.send(message+checksum(message)+'\r') if args.v: - print i + print(i) try: - received = connection.recv(BUFFER_SIZE) + received = connection.recv(BUFFER_SIZE) if received: if args.v: - print "Found something" - print received + print("Found something") + print(received) break except socket.timeout: if args.v: - print 'Timed out' + print('Timed out') if i == 256: - print "Wasn't able to find a pump. Check your connections" + print("Wasn't able to find a pump. Check your connections") else: - print 'Found a pump @ '+str(i) - + print('Found a pump @ '+str(i)) + return str(i) @@ -80,21 +79,20 @@ def find_pump(connection): if args.v: - print args.curr_addr, args.new_addr + print(args.curr_addr, args.new_addr) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((TCP_IP, TCP_PORT)) s.settimeout(10) if args.curr_addr == 'unknown': - args.curr_addr = find_pump(s) - if args.v: - print 'Found a pump at address: '+args.curr_addr + args.curr_addr = find_pump(s) + if args.v: + print('Found a pump at address: '+args.curr_addr) -print 'Changing address from: '+args.curr_addr + ' to: ' + args.new_addr +print('Changing address from: '+args.curr_addr + ' to: ' + args.new_addr) change_addr(args.curr_addr, args.new_addr, s) s.close() - diff --git a/scripts/pfeiffer_serial_tools/control.py b/scripts/pfeiffer_serial_tools/control.py index fdb4f5bc..9b30f165 100755 --- a/scripts/pfeiffer_serial_tools/control.py +++ b/scripts/pfeiffer_serial_tools/control.py @@ -4,106 +4,109 @@ 2013-09-26 Awallace This script is used to change the RS485 address of Pfeiffer turbo pumps using a digi port. See help for basic usage. +2024-04-24 roberttk +converted to py3.9 """ - - - - -import argparse, socket, time +import argparse +import socket +import time TCP_IP = '172.21.37.29' TCP_PORT = 2105 BUFFER_SIZE = 1024 + def est_connect(): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((TCP_IP, TCP_PORT)) + s.settimeout(10) + return s + - - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.connect((TCP_IP, TCP_PORT)) - s.settimeout(10) - - return s - def close_conn(connection): - connection.close() - return + connection.close() + return + def checksum(string): total = 0 for char in string: total += ord(char) - return format((total % 256),'03d') - + return format((total % 256), '03d') + + def c_pmp(control_code, control_parameter, connection, address='1'): - message = format(int(address), '03d')+'10'+control_code + format(len(control_parameter), '02d')+ control_parameter - print 'Sending '+message - connection.send(message + checksum(message) + '\r') - try: - response = connection.recv(BUFFER_SIZE) - except socket.timeout: - print 'Timeout' - return str(response) - + message = (format(int(address), '03d') + '10' + control_code + + format(len(control_parameter), '02d') + control_parameter) + print('Sending ' + message) + connection.send(message + checksum(message) + '\r') + try: + response = connection.recv(BUFFER_SIZE) + except socket.timeout: + print('Timeout') + return str(response) + + def q_pmp(query_code, connection, address='1'): - - message = format(int(address), '03d')+'00'+str(query_code)+'02'+'=?' - - cs = checksum(message) - - message = message + cs + '\r' - connection.send(message + checksum(message) + '\r') - try: - response = connection.recv(BUFFER_SIZE) - except socket.timeout: - print 'Timeout' - return response + message = format(int(address), '03d') + '00' + str(query_code) + '02' + '=?' + cs = checksum(message) + + message = message + cs + '\r' + connection.send(message + checksum(message) + '\r') + try: + response = connection.recv(BUFFER_SIZE) + except socket.timeout: + print('Timeout') + return response + def run_pmp(connection, address='1'): - c_pmp('010', '111111', connection, address) + c_pmp('010', '111111', connection, address) + def stop_pmp(connection, address='1'): - c_pmp('010', '000000', connection, address) + c_pmp('010', '000000', connection, address) -def pump_spd(connection, address='1'): - s = q_pmp(309, connection, address) - print s[10:16] + ' Hz' -def pump_pwr(connection, address='1'): - s = q_pmp(316, connection, address) - print s[10:16] + ' W' +def pump_spd(connection, address='1'): + s = q_pmp(309, connection, address) + print(s[10:16] + ' Hz') -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("-a", "--current_address", default='unknown', - required=True, - help="Address of the pump to control/ query", - metavar="Current Address", - dest='curr_addr') - parser.add_argument('-v', action='store_true') - parser.add_argument('-r', action='store_true') - parser.add_argument('-s', action='store_true') - parser.add_argument('-spd', action='store_true') - parser.add_argument('-pwr', action='store_true') - parser.add_argument('-mon', action='store_true') - args = parser.parse_args() - - conn1 = est_connect() - - if args.s: - stop_pmp(conn1, args.curr_addr) - elif args.spd: - pump_spd(conn1, args.curr_addr) - elif args.pwr: - pump_pwr(conn1, args.curr_addr) - elif args.mon: - while True: - pump_spd(conn1, args.curr_addr) - pump_pwr(conn1, args.curr_addr) - time.sleep(1) - elif args.r: - run_pmp(conn1, args.curr_addr) - - close_conn(conn1) +def pump_pwr(connection, address='1'): + s = q_pmp(316, connection, address) + print(s[10:16] + ' W') +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("-a", "--current_address", default='unknown', + required=True, + help="Address of the pump to control/ query", + metavar="Current Address", + dest='curr_addr') + parser.add_argument('-v', action='store_true') + parser.add_argument('-r', action='store_true') + parser.add_argument('-s', action='store_true') + parser.add_argument('-spd', action='store_true') + parser.add_argument('-pwr', action='store_true') + parser.add_argument('-mon', action='store_true') + args = parser.parse_args() + + conn1 = est_connect() + + if args.s: + stop_pmp(conn1, args.curr_addr) + elif args.spd: + pump_spd(conn1, args.curr_addr) + elif args.pwr: + pump_pwr(conn1, args.curr_addr) + elif args.mon: + while True: + pump_spd(conn1, args.curr_addr) + pump_pwr(conn1, args.curr_addr) + time.sleep(1) + elif args.r: + run_pmp(conn1, args.curr_addr) + + close_conn(conn1) diff --git a/scripts/pyps-deploy b/scripts/pyps-deploy index 483d5f51..c6cf2d18 100755 --- a/scripts/pyps-deploy +++ b/scripts/pyps-deploy @@ -9,17 +9,16 @@ and repoint the symbolic link at to the new release folder. """ import argparse -import subprocess -import stat import os +import subprocess CONDA_BASE = '/reg/g/pcds/pyps/conda/py36' APPS_BASE = '/reg/g/pcds/pyps/apps' # Some basic py2/py3 compat for this standalone script try: - input = raw_input -except: + input = raw_input # NOQA +except NameError: pass @@ -60,7 +59,7 @@ def main(): # Check the environment conda_setup = CONDA_BASE + '/etc/profile.d/conda.sh' if not os.path.exists(conda_setup): - print('No conda activate script at {}, aborting'.format(conda_source)) + print('No conda activate script at {}, aborting'.format(conda_setup)) return conda_env = CONDA_BASE + '/envs/' + args.conda if not os.path.exists(conda_env): @@ -116,5 +115,6 @@ def main(): print('{} does not exist, aborting'.format(APPS_BASE)) return + if __name__ == '__main__': main() diff --git a/scripts/xpp_update_happi_line.py b/scripts/xpp_update_happi_line.py index 58970db6..719e2f11 100644 --- a/scripts/xpp_update_happi_line.py +++ b/scripts/xpp_update_happi_line.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ -Updates items in the happi database to reflect the which line XPP is +Updates items in the happi database to reflect the which line XPP is using (mono line, pink line). Assumes pcds-conda, and grabs the currently active happi config. @@ -37,7 +37,7 @@ def main(): args = parser.parse_args() max_z = args.max_z min_z = args.min_z - + pink_active = 0 mono_active = 0 if args.pink: @@ -47,7 +47,7 @@ def main(): else: pink_active = EpicsSignalRO('XPP:INS:POS:01:IN_DI').get() mono_active = EpicsSignalRO('XPP:INS:POS:01:OUT_DO').get() - + # get current working config and happi client client = happi.client.Client.from_config() # grab all possibly relevant results @@ -95,8 +95,9 @@ def main(): print(' dry run, value not saved') continue res.item.save() - + print('Mode change completed') + if __name__ == '__main__': main() From 46b7662c5f5502f3c04e299fd9dc3f531662e14a Mon Sep 17 00:00:00 2001 From: tangkong Date: Wed, 24 Apr 2024 16:21:50 -0700 Subject: [PATCH 6/7] MNT/STY: upgrade and style a few python files I missed --- scripts/BLmotors | 3 ++- scripts/elog_par_post | 40 ++++++++++++++++++++++----------------- scripts/epicsArchChecker | 7 ++++--- scripts/epix_gains | 41 +++++++++++++++++++++------------------- scripts/evrStatus | 11 ++--------- scripts/image_saver | 17 +++++++++-------- 6 files changed, 62 insertions(+), 57 deletions(-) diff --git a/scripts/BLmotors b/scripts/BLmotors index 264f6109..38c3ec27 100755 --- a/scripts/BLmotors +++ b/scripts/BLmotors @@ -13,8 +13,9 @@ The script will generate a report for: 3. If the current configuration matches or not with the appropriate values. """ -import os import argparse +import os + import ophyd.signal from pmgr import pmgrAPI from prettytable import PrettyTable diff --git a/scripts/elog_par_post b/scripts/elog_par_post index 15a918ba..a58a843c 100755 --- a/scripts/elog_par_post +++ b/scripts/elog_par_post @@ -1,45 +1,50 @@ #!/usr/bin/env python -import requests -from requests.auth import HTTPBasicAuth import argparse import socket +import requests +from requests.auth import HTTPBasicAuth + + def readRunTable(fname): - runTableData={} + runTableData = {} with open(fname) as reader: lines = reader.readlines() for line in lines: try: print(line) - runTableData[line.split()[0]]=line.split()[1] - except: + runTableData[line.split()[0]] = line.split()[1] + except Exception: pass return runTableData + def postRunTableDict(runtable_data, experiment, run): - args_url="https://pswww.slac.stanford.edu/ws-auth/lgbk/" + args_url = "https://pswww.slac.stanford.edu/ws-auth/lgbk/" ws_url = args_url + "/run_control/{0}/ws/add_run_params".format(experiment) - print('URL:',ws_url) - user=experiment[:3]+'opr' - elogPostFile='/cds/home/opr/%s/forElogPost.txt'%user - hostname=socket.gethostname() - if hostname.find('sdf')>=0: - elogPostFile='/sdf/group/lcls/ds/tools/forElogPost.txt' - with open(elogPostFile,'r') as reader: + print('URL:', ws_url) + user = experiment[:3] + 'opr' + elogPostFile = '/cds/home/opr/%s/forElogPost.txt' % user + hostname = socket.gethostname() + if hostname.find('sdf') >= 0: + elogPostFile = '/sdf/group/lcls/ds/tools/forElogPost.txt' + with open(elogPostFile, 'r') as reader: answer = reader.readline() - r = requests.post(ws_url, params={"run_num": run}, json=runtable_data, \ - auth=HTTPBasicAuth(experiment[:3]+'opr', answer[:-1])) + r = requests.post(ws_url, params={"run_num": run}, json=runtable_data, + auth=HTTPBasicAuth(experiment[:3] + 'opr', answer[:-1])) print(r) + def postRunTable(fname, experiment, run): print(fname, experiment, run) - if fname=='pedestal': - dataDict={'pedestal':'done'} + if fname == 'pedestal': + dataDict = {'pedestal': 'done'} else: dataDict = readRunTable(fname) print(dataDict) postRunTableDict(dataDict, experiment, run) + def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-r', '--run', help='run number', @@ -51,5 +56,6 @@ def main(): postRunTable(args.file, args.experiment, args.run) + if __name__ == '__main__': main() diff --git a/scripts/epicsArchChecker b/scripts/epicsArchChecker index f1823845..c00a0154 100755 --- a/scripts/epicsArchChecker +++ b/scripts/epicsArchChecker @@ -9,8 +9,9 @@ of reports: 3. PVs no connected. 4. Files that do not exist. """ -import os import argparse +import os + import ophyd.signal from prettytable import PrettyTable @@ -119,7 +120,7 @@ def create_Lists(entries): myPVs.append(entry[1]) myFiles.append(entry[2]) lineNumbers.append('№ ' + str(entry[3])) - return(myKeys, myPVs, myFiles, lineNumbers) + return (myKeys, myPVs, myFiles, lineNumbers) def find_index(myKeys, myPVs, myFiles): @@ -146,7 +147,7 @@ def find_index(myKeys, myPVs, myFiles): if dmyPVs[dpv] == myPVs[pv]: indPVs.append(pv) - return(indKeys, indPVs) + return (indKeys, indPVs) def report_duplicates(indKeys, indPVs, myKeys, myPVs, myFiles, numLines): diff --git a/scripts/epix_gains b/scripts/epix_gains index 55ee8f3f..f245320d 100755 --- a/scripts/epix_gains +++ b/scripts/epix_gains @@ -1,29 +1,30 @@ #!/usr/bin/env python -import psana -from Detector.UtilsEpix10ka import find_gain_mode -from Detector.UtilsEpix10ka import info_pixel_gain_mode_statistics_for_raw -import sys import argparse +import sys + +import psana +from Detector.UtilsEpix10ka import (find_gain_mode, + info_pixel_gain_mode_statistics_for_raw) parser = argparse.ArgumentParser() parser.add_argument('--run', help='run', type=str) parser.add_argument('--experiment', help='experiment name', type=str) args = parser.parse_args() -exprun = 'exp=%s:run=%s'%(args.experiment,args.run) +exprun = 'exp=%s:run=%s' % (args.experiment, args.run) ds = psana.DataSource(exprun+':smd') aliases = [] for dn in psana.DetNames(): - if dn[1]!='': + if dn[1] != '': alias = dn[1] else: alias = dn[0] - if alias.find('10k')>0: + if alias.find('10k') > 0: aliases.append(alias) -if len(alias)==0: +if len(alias) == 0: print('did not find an epix10k, quit.') sys.exit() @@ -33,29 +34,31 @@ for ialias, alias in enumerate(aliases): epixs.append(psana.Detector(alias)) for run in ds.runs(): - print 'run loop' - for nstep,step in enumerate(run.steps()): + print('run loop') + for nstep, step in enumerate(run.steps()): for epix in epixs: - print '*** config gain mode: %s \t'%epix.name, find_gain_mode(det=epix) + print('*** config gain mode: %s \t' % epix.name, find_gain_mode(det=epix)) pvList = cd().pvLabels() for pv in pvList: - print 'Step',nstep,'name/value:',pv.name(),pv.value() + print('Step', nstep, 'name/value:', pv.name(), pv.value()) found = False nbad = 0 - raws=[] + raws = [] for epix in epixs: raws.append(None) - for nevt,evt in enumerate(step.events()): + for nevt, evt in enumerate(step.events()): if not found: - for iepix,epix in enumerate(epixs): + for iepix, epix in enumerate(epixs): found = True if epix.raw(evt) is None: nbad += 1 - raws[iepix]=None + raws[iepix] = None found = False else: - raws[iepix]=epix.raw(evt) + raws[iepix] = epix.raw(evt) if found: - for iepix,epix in enumerate(epixs): - print '--- event gain mode for %s \t'%epix.name,find_gain_mode(det=epix,data=raws[iepix]),info_pixel_gain_mode_statistics_for_raw(epix, raws[iepix]) + for iepix, epix in enumerate(epixs): + print('--- event gain mode for %s \t' % epix.name, + find_gain_mode(det=epix, data=raws[iepix]), + info_pixel_gain_mode_statistics_for_raw(epix, raws[iepix])) pass diff --git a/scripts/evrStatus b/scripts/evrStatus index 0846569b..8ebe1d16 100755 --- a/scripts/evrStatus +++ b/scripts/evrStatus @@ -6,15 +6,8 @@ from sys import stdin from pydm.widgets import PyDMPushButton from pydm.widgets.label import PyDMLabel from qtpy import QtCore -from qtpy.QtWidgets import ( - QApplication, - QGridLayout, - QHBoxLayout, - QHeaderView, - QLabel, - QTableWidget, - QWidget, -) +from qtpy.QtWidgets import (QApplication, QGridLayout, QHBoxLayout, + QHeaderView, QLabel, QTableWidget, QWidget) class Expert(QWidget): diff --git a/scripts/image_saver b/scripts/image_saver index edafb8ae..a80720f1 100755 --- a/scripts/image_saver +++ b/scripts/image_saver @@ -1,11 +1,12 @@ #!/usr/bin/env python -from argparse import ArgumentParser -from subprocess import getoutput -from time import time, sleep import os -from hutch_python.cam_load import interpret_lines, build_cam +from argparse import ArgumentParser from functools import partial +from subprocess import getoutput +from time import sleep, time + from hdf5_to_gif import convert +from hutch_python.cam_load import build_cam, interpret_lines def take_images(camera, num_images, path, filename, output_gif=""): @@ -161,11 +162,11 @@ def button_gui(cameras, num_images, path, filenames, hutch, filename_prefix, Path to which hdf5 images taken will be converted to gifs and stored in. If empty, no gifs will be made. """ - from qtpy.QtWidgets import (QApplication, QWidget, QGridLayout, - QPushButton, QLineEdit, QLabel, QHBoxLayout, - QInputDialog) - from qtpy.QtGui import QIntValidator from qtpy.QtCore import Qt + from qtpy.QtGui import QIntValidator + from qtpy.QtWidgets import (QApplication, QGridLayout, QHBoxLayout, + QInputDialog, QLabel, QLineEdit, QPushButton, + QWidget) app = QApplication([]) window = QWidget() window.setWindowTitle("Imager") From 548dceadd6753e4d5cc9606daadd7ecede6b26da Mon Sep 17 00:00:00 2001 From: tangkong Date: Thu, 25 Apr 2024 09:43:17 -0700 Subject: [PATCH 7/7] TST: run pre-commit checks on modified files --- .github/workflows/standard.yml | 42 ++++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/.github/workflows/standard.yml b/.github/workflows/standard.yml index 8f1f47aa..e12964de 100644 --- a/.github/workflows/standard.yml +++ b/.github/workflows/standard.yml @@ -8,8 +8,46 @@ on: - created jobs: + get_changed_files: + name: "get_changed_files" + runs-on: ubuntu-20.04 + outputs: + PROJECT_FILES: ${{ steps.changed_files.outputs.PROJECT_FILES }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: 'recursive' + + - name: Find all changed files + id: changed_files + if: ${{ github.event_name == 'pull_request' }} + run: | + # Fancy method of saying: + # * What files changed from this PR (the current checked out HEAD) + # * To the base that we're merging into (e.g., origin/master) + # Note that ACMRT = added/copied/modified/renamed/type change + # See more in `man git-diff-tree` under `--diff-filter` + git diff-tree --no-commit-id --name-only --diff-filter=ACM -r -z origin/${{ github.base_ref }} HEAD \ + | tee "$HOME/project_files.txt" + + + if [ ! -s "$HOME/project_files.txt" ]; then + echo "No source code files changed in this PR. Checking the entire repository." + find "." -print0 -type f \ + > "$HOME/project_files.txt" + fi + + # replace nulls with spaces + sed -i 's/\x0/ /g' $HOME/project_files.txt + + # set output + echo "PROJECT_FILES=$(<$HOME/project_files.txt)" >> "$GITHUB_OUTPUT" + pre-commit: - name: "pre-commit checks" + needs: + - get_changed_files + name: 'pre-commit checks' uses: pcdshub/pcds-ci-helpers/.github/workflows/pre-commit.yml@master with: - args: "--all-files" + args: "--files ${{ needs.get_changed_files.outputs.PROJECT_FILES }}"