diff --git a/pswi/03_create.sh b/pswi/03_create.sh index 9915adfd2b..e339903577 100644 --- a/pswi/03_create.sh +++ b/pswi/03_create.sh @@ -293,17 +293,17 @@ echo "Showing EXPORT JCL how it looks before the change" #else echo "Changing jobcard and adding SYSAFF" sed "s|//IZUD01EX JOB (ACCOUNT),'NAME'|$JOBST1\n$JOBST2|g" EXPORT >EXPJCL0 -sed "s|//.*gimzipInputFile.*,|EXPJCL1 -sed "s|// FILEDATA=TEXT| archid=\"ZOS003.ZWE.PSWI.AZWE003.ZFS\"/>|g" EXPJCL1 >EXPJCL2 -sed "s|// DD \*||g" EXPJCL2 >EXPJCL +#sed "s|//.*gimzipInputFile.*,|EXPJCL1 +#sed "s|// FILEDATA=TEXT| archid=\"ZOS003.ZWE.PSWI.AZWE003.ZFS\"/>|g" EXPJCL1 >EXPJCL2 +#sed "s|// DD \*||g" EXPJCL2 >EXPJCL # sed "s|ZOS003.ZWE.PSWI.|ZWE.PSWI.|g" EXPJCL3 >EXPJCL #fi -sh scripts/submit_jcl.sh "$(cat EXPJCL)" +sh scripts/submit_jcl.sh "$(cat EXPJCL0)" if [ $? -gt 0 ]; then exit -1; fi -rm ./EXPJCL +rm ./EXPJCL0 rm ./EXPORT # Pax the directory @@ -333,5 +333,4 @@ get ${SWI_NAME}.pax.Z EOF cd ../pswi -#TODO: redirect everything to $log/x ? #TODO: Check why there is name in mountpoints responses and it still doesn't show (although the mount points are different so it's good it is not doing anything) diff --git a/pswi/04_create_cleanup.sh b/pswi/04_create_cleanup.sh index f682379f3a..5039141893 100644 --- a/pswi/04_create_cleanup.sh +++ b/pswi/04_create_cleanup.sh @@ -100,7 +100,6 @@ sh scripts/submit_jcl.sh "$(cat JCL)" # Not checking results so the script doesn't fail rm JCL - # Unmount and delete echo "Unmounting and deleting zFS ${WORK_ZFS}." diff --git a/pswi/05_test.sh b/pswi/05_test.sh index 94afd818a2..46a67a57a4 100644 --- a/pswi/05_test.sh +++ b/pswi/05_test.sh @@ -68,7 +68,8 @@ sh scripts/tmp_mounts.sh "${WORK_ZFS}" "${WORK_MOUNT}" if [ $? -gt 0 ]; then exit -1; fi # Run the deployment test -echo " Running the deployment test for z/OSMF version 2.3" +echo " Running the deployment test for z/OSMF version ${ZOSMF_V}" pip install requests + python scripts/deploy_test_2_3.py diff --git a/pswi/06_test_cleanup.sh b/pswi/06_test_cleanup.sh index a45144a454..6f8ee19f06 100644 --- a/pswi/06_test_cleanup.sh +++ b/pswi/06_test_cleanup.sh @@ -15,7 +15,7 @@ echo "Portable Software Instance :" $PSWI echo "Software instance name :" $DEPLOY_NAME echo "Temporary zFS :" $TMP_ZFS echo "Work zFS :" $WORK_ZFS # For z/OSMF v2.3 -echo "Directory for logs :" $LOGDIR +echo "Directory for logs :" $LOG_DIR echo "ACCOUNT :" $ACCOUNT echo "SYSAFF :" $SYSAFF echo "z/OSMF version :" $ZOSMF_V @@ -25,35 +25,6 @@ DELETE_PSWI_URL="${BASE_URL}/zosmf/swmgmt/pswi/${ZOSMF_SYSTEM}/${PSWI}" WORKFLOW_LIST_URL="${BASE_URL}/zosmf/workflow/rest/1.0/workflows?owner=${ZOSMF_USER}&workflowName=${WORKFLOW_NAME}.*" DELETE_DEPL_SWI_URL="${BASE_URL}/zosmf/swmgmt/swi/${ZOSMF_SYSTEM}/${DEPLOY_NAME}" -check_response() { - RESP=$1 - RESPCODE=$2 - - REASON=$(echo $RESP | grep -o '"reason":') - EMPTY=$(echo $RESP | grep -o '\[\]') - MSG=$(echo $RESP | grep -o '"messageText":') - if [ -n "$REASON" ] || [ -n "$MSG" ]; then - echo "Info: Logging to file ${LOG_FILE}." - echo "$RESP" >>$LOG_FILE - fi - if [ -n "$EMPTY" ]; then - echo "Info: Logging to file ${LOG_FILE}." - echo "$RESP" >>$LOG_FILE - fi - if [ $RESPCODE -ne 0 ]; then - echo "Info: Logging to file ${LOG_FILE}." - if [ -n "$RESP" ]; then - echo "$RESP" >>$LOG_FILE - else - echo "REST API call wasn't successful." >>$LOG_FILE - fi - else - echo "REST API call was successful." - fi - - return -} - # Create a log file touch $LOG_FILE @@ -61,16 +32,7 @@ touch $LOG_FILE echo "Invoking REST API to delete the Software Instance created by deployment." RESP=$(curl -s $DELETE_DEPL_SWI_URL -k -X "DELETE" -H "Content-Type: application/json" -H "X-CSRF-ZOSMF-HEADER: A" --user $ZOSMF_USER:$ZOSMF_PASS) -check_response "${RESP}" $? - -if [ "$ZOSMF_V" = "2.4" ]; then - - # Delete the Portable Software Instance - echo "Invoking REST API to delete the portable software instance." - - RESP=$(curl -s $DELETE_PSWI_URL -k -X "DELETE" -H "Content-Type: application/json" -H "X-CSRF-ZOSMF-HEADER: A" --user $ZOSMF_USER:$ZOSMF_PASS) - check_response "${RESP}" $? -fi +sh scripts/check_response.sh "${RESP}" $? # Unmount and delete echo "Unmounting and deleting zFS ${TMP_ZFS}." @@ -94,29 +56,7 @@ sh scripts/submit_jcl.sh "$(cat JCL)" # Not checking results so the script doesn't fail rm JCL -if [ "$ZOSMF_V" = "2.3" ]; then - # Unmount and delete - echo "Unmounting and deleting zFS ${WORK_ZFS}." - - echo ${JOBST1} >JCL - echo ${JOBST2} >>JCL - echo "//UNMNTZFS EXEC PGM=IKJEFT01,REGION=4096K,DYNAMNBR=50" >>JCL - echo "//SYSTSPRT DD SYSOUT=*" >>JCL - echo "//SYSTSOUT DD SYSOUT=*" >>JCL - echo "//SYSTSIN DD * " >>JCL - echo "UNMOUNT FILESYSTEM('${WORK_ZFS}') + " >>JCL - echo "IMMEDIATE" >>JCL - echo "/*" >>JCL - echo "//DELTZFST EXEC PGM=IDCAMS" >>JCL - echo "//SYSPRINT DD SYSOUT=*" >>JCL - echo "//SYSIN DD *" >>JCL - echo " DELETE ${WORK_ZFS}" >>JCL - echo "/*" >>JCL - - sh scripts/submit_jcl.sh "$(cat JCL)" - # Not checking results so the script doesn't fail - rm JCL -fi + # Unmount and delete echo "Unmounting and deleting zFS ${TEST_HLQ}.ZFS." @@ -127,6 +67,13 @@ echo "//UNMNTZFS EXEC PGM=IKJEFT01,REGION=4096K,DYNAMNBR=50" >>JCL echo "//SYSTSPRT DD SYSOUT=*" >>JCL echo "//SYSTSOUT DD SYSOUT=*" >>JCL echo "//SYSTSIN DD * " >>JCL +echo "UNMOUNT FILESYSTEM('${TEST_HLQ}.ZFS.#') + " >>JCL +echo "IMMEDIATE" >>JCL +echo "/*" >>JCL +echo "//UNMNTZF2 EXEC PGM=IKJEFT01,REGION=4096K,DYNAMNBR=50" >>JCL +echo "//SYSTSPRT DD SYSOUT=*" >>JCL +echo "//SYSTSOUT DD SYSOUT=*" >>JCL +echo "//SYSTSIN DD * " >>JCL echo "UNMOUNT FILESYSTEM('${TEST_HLQ}.ZFS') + " >>JCL echo "IMMEDIATE" >>JCL echo "/*" >>JCL @@ -134,6 +81,7 @@ echo "//DELTZFST EXEC PGM=IDCAMS" >>JCL echo "//SYSPRINT DD SYSOUT=*" >>JCL echo "//SYSIN DD *" >>JCL echo " DELETE ${TEST_HLQ}.ZFS" >>JCL +echo " DELETE ${TEST_HLQ}.ZFS.#" >>JCL echo "/*" >>JCL sh scripts/submit_jcl.sh "$(cat JCL)" @@ -155,21 +103,25 @@ echo "/*" >>JCL sh scripts/submit_jcl.sh "$(cat JCL)" rm JCL -if [ "$ZOSMF_V" = "2.4" ]; then - # Delete Post-deployment workflow in z/OSMF - echo "Invoking REST API to delete Post-deployment workflows." - - # Get workflowKey for Post-deployment workflow owned by user - RESP=$(curl -s $WORKFLOW_LIST_URL -k -X "GET" -H "Content-Type: application/json" -H "X-CSRF-ZOSMF-HEADER: A" --user $ZOSMF_USER:$ZOSMF_PASS) - check_response "${RESP}" $? - WFKEYS=$(echo $RESP | sed 's/},/},\n/g' | grep -oP '"workflowKey":".*"' | cut -f4 -d\") - IFS=$'\n' - for KEY in $WFKEYS; do +# Unmount and delete +echo "Unmounting and deleting zFS ${WORK_ZFS}." - echo "Deleting a workflow." - RESP=$(curl -s ${BASE_URL}/zosmf/workflow/rest/1.0/workflows/${KEY} -k -X "DELETE" -H "Content-Type: application/json" -H "X-CSRF-ZOSMF-HEADER: A" --user $ZOSMF_USER:$ZOSMF_PASS) - check_response "${RESP}" $? +echo ${JOBST1} >JCL +echo ${JOBST2} >>JCL +echo "//UNMNTZFS EXEC PGM=IKJEFT01,REGION=4096K,DYNAMNBR=50" >>JCL +echo "//SYSTSPRT DD SYSOUT=*" >>JCL +echo "//SYSTSOUT DD SYSOUT=*" >>JCL +echo "//SYSTSIN DD * " >>JCL +echo "UNMOUNT FILESYSTEM('${WORK_ZFS}') + " >>JCL +echo "IMMEDIATE" >>JCL +echo "/*" >>JCL +echo "//DELTZFST EXEC PGM=IDCAMS" >>JCL +echo "//SYSPRINT DD SYSOUT=*" >>JCL +echo "//SYSIN DD *" >>JCL +echo " DELETE ${WORK_ZFS}" >>JCL +echo "/*" >>JCL - done -fi +sh scripts/submit_jcl.sh "$(cat JCL)" +# Not checking results so the script doesn't fail +rm JCL diff --git a/pswi/scripts/deploy_test_2_3.py b/pswi/scripts/deploy_test_2_3.py index 7163577ce5..aa51b4976d 100644 --- a/pswi/scripts/deploy_test_2_3.py +++ b/pswi/scripts/deploy_test_2_3.py @@ -4,48 +4,55 @@ import os import glob + class Deploy_test: - - def __init__(self, url, user, password, system, hlq, jobst1, jobst2, volume, tzone, dzone, new_mountpoint, pswi_path, work_mount, swi_name): - - izudurl = "{0}/zosmf/restfiles/fs{1}/IZUD00DF.json".format(url, pswi_path) - self.headers = {'X-CSRF-ZOSMF-HEADER': ''} - resp = requests.get(izudurl, headers=self.headers, auth=(user, password), verify=False) - - izud = json.loads(resp.text) - # Set variables - self.url = url - self.user = user - self.password = password - self.system = system - self.hlq = hlq.upper() - self.jobst1 = jobst1 + "\n" - self.jobst2 = jobst2 + "\n" - self.volume = volume.upper() - self.pswi_path = pswi_path - self.tzone = tzone.upper() - self.dzone = dzone.upper() - self.new_mountp = new_mountpoint - - self.definition = izud["izud.pswi.descriptor"] - self.datasets = self.definition["datasets"] - self.swi_name = swi_name - - for dataset in self.datasets: - if dataset["zonedddefs"] is not None: - for zonedddef in dataset["zonedddefs"]: - for dddef in zonedddef["dddefs"]: - if dddef["path"] is not None: - self.no_dddef = dddef["dddef"] - self.old_mountp = dataset["mountpoint"] - - for zone in self.definition["zones"]: - if zone["type"] == "TARGET": - self.target = zone["name"] - elif zone["type"] == "DLIB": - self.dlib = zone["name"] - - self.job1 = """//GIMUNZIP EXEC PGM=GIMUNZIP,PARM='HASH=NO' + + def __init__(self, url, user, password, system, hlq, jobst1, jobst2, volume, tzone, dzone, new_mountpoint, pswi_path, + work_mount, swi_name): + + izudurl = "{0}/zosmf/restfiles/fs{1}/IZUD00DF.json".format(url, pswi_path) + self.headers = {'X-CSRF-ZOSMF-HEADER': ''} + resp = requests.get(izudurl, headers=self.headers, auth=(user, password), verify=False) + + izud = json.loads(resp.text) + # Set variables + self.url = url + self.user = user + self.password = password + self.system = system + self.hlq = hlq.upper() + self.jobst1 = jobst1 + "\n" + self.jobst2 = jobst2 + "\n" + self.volume = volume.upper() + self.pswi_path = pswi_path + self.tzone = tzone.upper() + self.dzone = dzone.upper() + self.new_mountp = new_mountpoint + + self.definition = izud["izud.pswi.descriptor"] + self.datasets = self.definition["datasets"] + self.work_mount = work_mount + self.swi_name = swi_name + self.version = int(self.definition["version"]) + for dataset in self.datasets: + if dataset["zonedddefs"] is not None: + for zonedddef in dataset["zonedddefs"]: + for dddef in zonedddef["dddefs"]: + if dddef["path"] is not None: + self.no_dddef = dddef["dddef"] + self.old_mountp = dataset["mountpoint"] + self.tracks = int(dataset["tracks"]) + self.secondary = int(dataset["secondary"]) + self.new_zfs = self.new_name(dataset["dsname"]) + self.zfs_archid = dataset["archid"] + + for zone in self.definition["zones"]: + if zone["type"] == "TARGET": + self.target = zone["name"] + elif zone["type"] == "DLIB": + self.dlib = zone["name"] + + self.job1 = """//GIMUNZIP EXEC PGM=GIMUNZIP,PARM='HASH=NO' //SYSUT3 DD UNIT=SYSALLDA,SPACE=(CYL,(1,1)) //SYSUT4 DD UNIT=SYSALLDA,SPACE=(CYL,(1,1)) //SMPWKDIR DD PATH='{0}/' @@ -56,15 +63,15 @@ def __init__(self, url, user, password, system, hlq, jobst1, jobst2, volume, tzo //SYSIN DD * -""".format(work_mount,self.pswi_path,self.volume) - self.job1_end = """ +""".format(self.work_mount, self.pswi_path, self.volume) + self.job1_end = """ /* """ - self.job2 = """//RENAME1 EXEC PGM=IDCAMS,REGION=0M + self.job2 = """//RENAME1 EXEC PGM=IDCAMS,REGION=0M //SYSPRINT DD SYSOUT=* //SYSIN DD * """ - self.job3 = """//UPDZONES EXEC PGM=GIMSMP,REGION=0M, + self.job3 = """//UPDZONES EXEC PGM=GIMSMP,REGION=0M, // PARM='CSI={0}' //SMPLOG DD SYSOUT=* //SMPLOGA DD SYSOUT=* @@ -74,7 +81,7 @@ def __init__(self, url, user, password, system, hlq, jobst1, jobst2, volume, tzo //SMPPTS DD UNIT=SYSALLDA,SPACE=(TRK,(1,1,5)) //SMPCNTL DD * """.format(self.new_name(self.definition["globalzone"])) - self.job3_global = """ SET BOUNDARY(GLOBAL). + self.job3_global = """ SET BOUNDARY(GLOBAL). UCLIN. REP GLOBALZONE ZONEINDEX( @@ -82,42 +89,151 @@ def __init__(self, url, user, password, system, hlq, jobst1, jobst2, volume, tzo ({1},{2},DLIB) ). """.format(self.tzone, self.dzone, self.new_name(self.definition["globalzone"])) - - self.job3_target = """ SET BOUNDARY({0}). + + self.job3_target = """ SET BOUNDARY({0}). UCLIN. REP TZONE({0}) RELATED({1}). """.format(self.tzone, self.dzone) - - self.job3_path = """ ZONEEDIT DDDEF. + + self.job3_path = "" + if self.version >= 9: + self.job3_path = """ REP DDDEF({0}) PATH( + '{1}' + ). +""".format(self.no_dddef, self.new_mountp + "/") + else: + self.job3_path = """ ZONEEDIT DDDEF. CHANGE PATH( '{0}'*, '{1}'*). ENDZONEEDIT. """.format(self.old_mountp, self.new_mountp) - self.job3_endzone = " ENDUCL.\n" - self.job3_distribution = """ SET BOUNDARY({0}). + self.job3_endzone = " ENDUCL.\n" + self.job3_distribution = """ SET BOUNDARY({0}). UCLIN. REP DZONE({0}) RELATED({1}). """.format(self.dzone, self.tzone) + + self.zfs_job = """//ALLOCDS EXEC PGM=IDCAMS,COND=(0,LT) +//SYSPRINT DD SYSOUT=* +//SYSIN DD * + DEFINE CLUSTER( + + NAME({0}) + + CYLINDERS({1},{2}) + + VOLUME({3}) + + ZFS + + SHAREOPTIONS(2)) +/* +//MOUNT1 EXEC PGM=BPXBATCH,COND=(0,LT) +//STDOUT DD SYSOUT=* +//STDERR DD SYSOUT=* +//STDPARM DD * +SH ; +dsn='{0}'; +mpdir={4}/$dsn; +if [ -e "$mpdir" ]; then; + rm -r $mpdir; +fi; +if [ ! -e "$mpdir" ]; then; + echo "Work directory $mpdir will be created."; + umask 077 ; + mkdir -p -m 700 "$mpdir"; + rc=$?; + if [ $rc -ne 0 ]; then; + echo "** mkdir command failure: rc=$rc"; + exit $rc; + fi; +fi; +echo "Format the file system $dsn."; +zfsadm format -aggregate $dsn; +rc=$?; +if [ $rc -ne 0 ]; then; + echo "** zfsadm format command failure: rc=$rc"; + exit $rc; +fi; +echo "Mount $dsn on $mpdir."; +/usr/sbin/mount -t ZFS -s nosecurity -f $dsn $mpdir; +if [ $rc -ne 0 ]; then; + echo "** mount command failure: rc=$rc"; + exit $rc; +fi; +mkdir -p {4}/workdir; +/* +//UNZIP EXEC PGM=GIMUNZIP,PARM='HASH=NO',COND=(0,LT) +//SYSUT3 DD UNIT=SYSALLDA,SPACE=(CYL,(1,1)) +//SYSUT4 DD UNIT=SYSALLDA,SPACE=(CYL,(1,1)) +//SMPWKDIR DD PATH='{4}/workdir' +//SMPOUT DD SYSOUT=* +//SYSPRINT DD SYSOUT=* +//SMPDIR DD PATHDISP=KEEP, +// PATH='{5}' +//SYSIN DD * + + + +/* + """.format(self.new_zfs + ".#", int(self.tracks / 15), int(self.secondary / 15), self.volume, self.work_mount, + self.pswi_path, self.zfs_archid, self.work_mount + "/" + self.new_zfs + ".#") + + def create_zfs(self): + new_zfs = {"cylsPri": int(self.tracks / 15), "cylsSec": int(self.secondary / 15), "volumes": [self.volume]} + new_zfs_url = "{0}/zosmf/restfiles/mfs/zfs/{1}".format(self.url, self.new_zfs + ".%23") + new_zfs_resp = requests.post(new_zfs_url, headers=self.headers, auth=(user, password), data=json.dumps(new_zfs), + verify=False) + if new_zfs_resp.status_code != 201: + print("Status code: {0}".format(new_zfs_resp.status_code)) + raise requests.exceptions.RequestException(new_zfs_resp.text) + + def create_directory(self, dir_name): + dir_name = self.work_mount + "/" + self.new_zfs + ".%23" + dir_parms = {"type": "directory", "mode": "rwxr-xrwx"} + dir_url = "{0}/zosmf/restfiles/fs{1}".format(self.url, dir_name) + dir_resp = requests.post(dir_url, headers=self.headers, auth=(user, password), data=json.dumps(dir_parms), + verify=False) + + if dir_resp.status_code != 201: + print("Status code: {0}".format(dir_resp.status_code)) + raise requests.exceptions.RequestException(dir_resp.text) + + def mount(self, dir=None, zfs=None, action="mount"): + if dir is None and zfs is None: + dir = self.work_mount + "/" + self.new_zfs + ".#" + zfs = self.new_zfs + ".%23" + action = "unmount" + elif dir is None or zfs is None: + raise TypeError("Wrong arguments") + + mount_parms = {"action": action, "mount-point": dir, "fs-type": "zFS", "mode": "rdwr"} + mount_url = "{0}/zosmf/restfiles/mfs/{1}".format(self.url, zfs) + mount_resp = requests.put(mount_url, headers=self.headers, auth=(user, password), data=json.dumps(mount_parms), + verify=False) + + if mount_resp.status_code != 204: + print("Status code: {0}".format(mount_resp.status_code)) + raise requests.exceptions.RequestException(mount_resp.text) - def archdef(self, dataset): - if dataset["dsname"].endswith(".CSI"): - new_name = self.new_name(dataset["dsname"]) - else: - new_name = self.new_name(dataset["dsname"]) + ".#" - return """= 9: + return "" + else: + new_name = self.new_name(dataset["dsname"]) + ".#" + return """ """.format(dataset["archid"], new_name, self.volume) - - def new_name(self, dsname): - return self.hlq + dsname[dsname.rfind("."):] - - def listcat(self, dataset): - final_name = self.new_name(dataset) - new_name = final_name + ".#" - lstcat = """ LISTCAT - + + def new_name(self, dsname): + return self.hlq + dsname[dsname.rfind("."):] + + def listcat(self, dataset): + final_name = self.new_name(dataset) + new_name = final_name + ".#" + lstcat = """ LISTCAT - ENTRY({0}) IF LASTCC = 0 THEN DO ALTER - @@ -127,149 +243,157 @@ def listcat(self, dataset): IF LASTCC = 0 THEN SET MAXCC = 0 ELSE CANCEL """.format(new_name, final_name) - if dataset.endswith(".ZFS"): - self.zfs = final_name - zfs = """ + if dataset.endswith(".ZFS"): + self.zfs = final_name + zfs = """ ALTER - {0}.* - NEWNAME({1}.*)""".format(new_name, final_name) - return lstcat.replace("|zfs|",zfs) - else: - return lstcat.replace("|zfs|", "") - - def zone_template(self, dataset, zone): - dddef_templ = "" - if dataset["zonedddefs"] is not None: - for zoneddef in dataset["zonedddefs"]: - if zoneddef["zone"] == zone: - for dddef in zoneddef["dddefs"]: - if dddef["dddef"] == self.no_dddef: - continue - dddef_templ = dddef_templ + """ REP DDDEF({0}) + return lstcat.replace("|zfs|", zfs) + else: + return lstcat.replace("|zfs|", "") + + def zone_template(self, dataset, zone): + dddef_templ = "" + if dataset["zonedddefs"] is not None: + for zoneddef in dataset["zonedddefs"]: + if zoneddef["zone"] == zone: + for dddef in zoneddef["dddefs"]: + if dddef["dddef"] == self.no_dddef: + continue + dddef_templ = dddef_templ + """ REP DDDEF({0}) DATASET({1}) VOLUME() UNIT(). -""".format(dddef["dddef"],self.new_name(dataset["dsname"])) - return dddef_templ - - def first_job(self): - jcl = self.jobst1 + self.jobst2 + self.job1 - for dataset in self.datasets: - jcl = jcl + self.archdef(dataset) - return jcl + self.job1_end - - def second_job(self): - jcl = self.jobst1 + self.jobst2 + self.job2 - for dataset in self.datasets: - if dataset["dsname"].endswith(".CSI"): - continue - jcl = jcl + self.listcat(dataset["dsname"]) - return jcl + "//*" - - def third_job(self): - jcl = self.jobst1 + self.jobst2 + self.job3 + self.job3_global - for dataset in self.datasets: - jcl = jcl + self.zone_template(dataset, "GLOBAL") - jcl = jcl + self.job3_endzone + self.job3_target - for dataset in self.datasets: - jcl = jcl + self.zone_template(dataset, self.target) - jcl = jcl + self.job3_endzone + self.job3_path + self.job3_distribution - for dataset in self.datasets: - jcl = jcl + self.zone_template(dataset, self.dlib) - return jcl + self.job3_endzone + "/*" - - def create_swi(self): - mount_parms = {"action": "mount", "mount-point": self.new_mountp, "fs-type": "zFS", "mode": "rdwr"} - mount_url = "{0}/zosmf/restfiles/mfs/{1}".format(self.url, self.zfs) - mount_resp = requests.put(mount_url, headers=self.headers, auth=(user, password), data=json.dumps(mount_parms), - verify=False) - if mount_resp.status_code != 204: - print("Status code: {0}".format(mount_resp.status_code)) - raise requests.exceptions.RequestException(mount_resp.text) - - parms = { - "name": self.swi_name, - "system": self.system, - "description": "Zowe Deploy test", - "globalzone": self.new_name(self.definition["globalzone"]), - "targetzones": [self.target], - "workflows": [ - {"name": "ZOWE Mount Workflow", - "description": "This workflow performs mount action of ZOWE zFS.", - "location": {"dsname": self.hlq + ".WORKFLOW(ZWEWRF02)"}}, - {"name": "ZOWE Configuration of Zowe 3.0", - "description": "This workflow configures Zowe v3.0.", - "location": {"dsname": self.hlq + ".WORKFLOW(ZWECONF)"}}, - {"name":"ZOWE Creation of CSR request workflow", - "description":"This workflow creates a certificate sign request.", - "location": {"dsname": self.hlq + ".WORKFLOW(ZWECRECR)"}}, - {"name":"ZOWE Sign a CSR request", - "description":"This workflow signs the certificate sign request by a local CA.", - "location": {"dsname": self.hlq + ".WORKFLOW(ZWESIGNC)"}}, - {"name":"ZOWE Load Authentication Certificate into ESM", - "description":"This workflow loads a signed client authentication certificate to the ESM.", - "location": {"dsname": self.hlq + ".WORKFLOW(ZWELOADC)"}}, - {"name":"ZOWE Define key ring and certificates", - "description":"This workflow defines key ring and certificates for Zowe.", - "location": {"dsname": self.hlq + ".WORKFLOW(ZWEKRING)"}} - ] - } - swi_url = "{0}/zosmf/swmgmt/swi".format(self.url) - swi_resp = requests.post(swi_url, headers=self.headers, auth=(user, password), data=json.dumps(parms), verify=False) - if swi_resp.status_code != 200: - raise requests.exceptions.RequestException(swi_resp.text) - - prod_url = "{0}/zosmf/swmgmt/swi/{1}/{2}/products".format(self.url, self.system, self.swi_name) - prod_resp = requests.put(prod_url, headers=self.headers, auth=(user, password), verify=False) - if prod_resp.status_code != 202: - raise requests.exceptions.RequestException(prod_resp.text) - status = "" - while status != "complete": - starus_url = prod_resp.json()["statusurl"] - status_resp= requests.get(starus_url, headers=self.headers, auth=(user, password), verify=False) - if status_resp.status_code != 200: - raise requests.exceptions.RequestException(status_resp.text) - status = status_resp.json()["status"] - +""".format(dddef["dddef"], self.new_name(dataset["dsname"])) + return dddef_templ + + def first_job(self): + jcl = self.jobst1 + self.jobst2 + self.job1 + for dataset in self.datasets: + jcl = jcl + self.archdef(dataset) + return jcl + self.job1_end + + def zfsInstall_job(self): + return self.jobst1 + self.jobst2 + self.zfs_job + + def second_job(self): + jcl = self.jobst1 + self.jobst2 + self.job2 + for dataset in self.datasets: + if dataset["dsname"].endswith(".CSI"): + continue + jcl = jcl + self.listcat(dataset["dsname"]) + return jcl + "//*" + + def third_job(self): + jcl = self.jobst1 + self.jobst2 + self.job3 + self.job3_global + for dataset in self.datasets: + jcl = jcl + self.zone_template(dataset, "GLOBAL") + jcl = jcl + self.job3_endzone + self.job3_target + for dataset in self.datasets: + jcl = jcl + self.zone_template(dataset, self.target) + if self.version >= 9: + jcl = jcl + self.job3_path + self.job3_endzone + self.job3_distribution + else: + jcl = jcl + self.job3_endzone + self.job3_path + self.job3_distribution + for dataset in self.datasets: + jcl = jcl + self.zone_template(dataset, self.dlib) + return jcl + self.job3_endzone + "/*" + + def create_swi(self): + self.mount(self.new_mountp, self.zfs, "mount") + + parms = { + "name": self.swi_name, + "system": self.system, + "description": "Zowe Deploy test", + "globalzone": self.new_name(self.definition["globalzone"]), + "targetzones": [self.target], + "workflows": [ + {"name": "ZOWE Mount Workflow", + "description": "This workflow performs mount action of ZOWE zFS.", + "location": {"dsname": self.hlq + ".WORKFLOW(ZWEWRF02)"}}, + {"name": "ZOWE Configuration of Zowe 3.0", + "description": "This workflow configures Zowe v3.0.", + "location": {"dsname": self.hlq + ".WORKFLOW(ZWECONF)"}}, + {"name": "ZOWE Creation of CSR request workflow", + "description": "This workflow creates a certificate sign request.", + "location": {"dsname": self.hlq + ".WORKFLOW(ZWECRECR)"}}, + {"name": "ZOWE Sign a CSR request", + "description": "This workflow signs the certificate sign request by a local CA.", + "location": {"dsname": self.hlq + ".WORKFLOW(ZWESIGNC)"}}, + {"name": "ZOWE Load Authentication Certificate into ESM", + "description": "This workflow loads a signed client authentication certificate to the ESM.", + "location": {"dsname": self.hlq + ".WORKFLOW(ZWELOADC)"}}, + {"name": "ZOWE Define key ring and certificates", + "description": "This workflow defines key ring and certificates for Zowe.", + "location": {"dsname": self.hlq + ".WORKFLOW(ZWEKRING)"}} + ] + } + swi_url = "{0}/zosmf/swmgmt/swi".format(self.url) + swi_resp = requests.post(swi_url, headers=self.headers, auth=(user, password), data=json.dumps(parms), verify=False) + if swi_resp.status_code != 200: + raise requests.exceptions.RequestException(swi_resp.text) + + prod_url = "{0}/zosmf/swmgmt/swi/{1}/{2}/products".format(self.url, self.system, self.swi_name) + prod_resp = requests.put(prod_url, headers=self.headers, auth=(user, password), verify=False) + if prod_resp.status_code != 202: + raise requests.exceptions.RequestException(prod_resp.text) + status = "" + while status != "complete": + starus_url = prod_resp.json()["statusurl"] + status_resp = requests.get(starus_url, headers=self.headers, auth=(user, password), verify=False) + if status_resp.status_code != 200: + raise requests.exceptions.RequestException(status_resp.text) + status = status_resp.json()["status"] + + if __name__ == "__main__": - url = os.environ['ZOSMF_URL'] + ":" + os.environ['ZOSMF_PORT'] # Url and port of the z/OSMF server - # # auth - user = os.environ['ZOSMF_USER'] # z/OSMF user - password = os.environ['ZOSMF_PASS'] # Password for z/OSMF - system = os.environ['ZOSMF_SYSTEM'] # z/OSMF nickname for the system where the PSWI will be deployed - hlq = os.environ['TEST_HLQ'] # HLQ for new datasets - mount = os.environ['TEST_MOUNT'] # New mount point for ZFS #newmount - jobst1 = os.environ['JOBST1'] # Job statement - jobst2 = os.environ['JOBST2'] # Sysaff - volume = os.environ['VOLUME'] # Volum where to store datasets - work_path = os.environ['WORK_MOUNT'] # SMP work directory - tzone = os.environ['TZONE'] # Target zone - dzone = os.environ['DZONE'] # Dlib - pswi_path = os.environ['EXPORT'] # Path to unzipped PSWI - swi_name = os.environ['DEPLOY_NAME'] # Name of the software instance to be created - - deploy = Deploy_test(url, user, password, system, hlq, jobst1, jobst2, volume, tzone, dzone, mount, pswi_path, work_path, swi_name) + url = os.environ['ZOSMF_URL'] + ":" + os.environ['ZOSMF_PORT'] # Url and port of the z/OSMF server + # # auth + user = os.environ['ZOSMF_USER'] # z/OSMF user + password = os.environ['ZOSMF_PASS'] # Password for z/OSMF + system = os.environ['ZOSMF_SYSTEM'] # z/OSMF nickname for the system where the PSWI will be deployed + hlq = os.environ['TEST_HLQ'] # HLQ for new datasets + mount = os.environ['TEST_MOUNT'] # New mount point for ZFS #newmount + jobst1 = os.environ['JOBST1'] # Job statement + jobst2 = os.environ['JOBST2'] # Sysaff + volume = os.environ['VOLUME'] # Volum where to store datasets + work_path = os.environ['WORK_MOUNT'] # SMP work directory + tzone = os.environ['TZONE'] # Target zone + dzone = os.environ['DZONE'] # Dlib + pswi_path = os.environ['EXPORT'] # Path to unzipped PSWI + swi_name = os.environ['DEPLOY_NAME'] # Name of the software instance to be created - first = deploy.first_job() - second = deploy.second_job() - third = deploy.third_job() - - try: - submit_jcl = glob.glob('./*/submit_jcl.sh')[0] - except IndexError: - raise FileNotFoundError("\"submit_jcl.sh\" for submitting JCLs wasn't found. Make sure that it is in a subfolder of {0}".format(os.getcwd())) - - ec1 = subprocess.call(["sh", submit_jcl , first]) - if ec1 != 0: - raise OSError("The first job failed.") - ec2 = subprocess.call(["sh", submit_jcl, second]) - if ec2 != 0: - raise OSError("The second job failed.") - ec3 = subprocess.call(["sh", submit_jcl, third]) - if ec3 != 0: - raise OSError("The third job failed.") - - deploy.create_swi() - print("Portable software instance deployed successfully!") + deploy = Deploy_test(url, user, password, system, hlq, jobst1, jobst2, volume, tzone, dzone, mount, pswi_path, + work_path, swi_name) + + unzip_job = deploy.first_job() + install_zfs = deploy.zfsInstall_job() + rename_datasets = deploy.second_job() + update_csi = deploy.third_job() + + try: + submit_jcl = glob.glob('./*/submit_jcl.sh')[0] + except IndexError: + raise FileNotFoundError( + "\"submit_jcl.sh\" for submitting JCLs wasn't found. Make sure that it is in a subfolder of {0}".format( + os.getcwd())) + + unzip_rc = subprocess.call(["sh", submit_jcl, unzip_job]) + if unzip_rc != 0: + raise OSError("The unzip datasets job failed.") + install_rc = subprocess.call(["sh", submit_jcl, install_zfs]) + if install_rc != 0: + raise OSError("The install zFS datasets job failed.") + deploy.mount() # unmount + rename_rc = subprocess.call(["sh", submit_jcl, rename_datasets]) + if rename_rc != 0: + raise OSError("The rename datasets job failed.") + update_rc = subprocess.call(["sh", submit_jcl, update_csi]) + if update_rc != 0: + raise OSError("The update CSI job failed.") + deploy.create_swi() + print("Portable software instance deployed successfully!") -#todo: function for removing just the HLQ which all the old datasets have same -> needed only for internal usage +# todo: function for removing just the HLQ which all the old datasets have same -> needed only for internal usage diff --git a/pswi/scripts/spool_files.sh b/pswi/scripts/spool_files.sh index 289fe3ce8c..29b8ef107c 100644 --- a/pswi/scripts/spool_files.sh +++ b/pswi/scripts/spool_files.sh @@ -20,4 +20,4 @@ done >$LOG_DIR/report.txt - cat $JOBNAME/$JOBID >>$LOG_DIR/report.txt + cat $LOG_DIR/jobs/output/$JOBNAME_$JOBID >>$LOG_DIR/report.txt exit -1 fi