diff --git a/dpgen/auto_test/lib/mfp_eosfit.py b/dpgen/auto_test/lib/mfp_eosfit.py index c28b5651f..227012844 100755 --- a/dpgen/auto_test/lib/mfp_eosfit.py +++ b/dpgen/auto_test/lib/mfp_eosfit.py @@ -1441,16 +1441,7 @@ def ext_velp( ) for i in range(ndata): fw.write( - "{:12.6f}\t{:12.6f}\t{:12.6f}\t{:12.6f}\t{:12.6f}\t{:12.6f}\t{:12.6f}\t{:12.6f}\n".format( - vv[i], - ee[i], - cellaa[i], - cellbb[i], - cellcc[i], - cellbaba[i], - cellcaca[i], - cellca_cal[i], - ) + f"{vv[i]:12.6f}\t{ee[i]:12.6f}\t{cellaa[i]:12.6f}\t{cellbb[i]:12.6f}\t{cellcc[i]:12.6f}\t{cellbaba[i]:12.6f}\t{cellcaca[i]:12.6f}\t{cellca_cal[i]:12.6f}\n" ) fw.flush() fw.close() @@ -1662,9 +1653,7 @@ def lsqfit_eos( ) for i in range(len(vol)): fve.write( - "{:20f}\t{:20f}\t{:20f}\t{:20f}\n".format( - vol[i], repro_en[i], en[i], 100 * np.abs((en[i] - repro_en[i]) / en[i]) - ) + f"{vol[i]:20f}\t{repro_en[i]:20f}\t{en[i]:20f}\t{100 * np.abs((en[i] - repro_en[i]) / en[i]):20f}\n" ) fve.flush() p_tmp = repro_press[i] diff --git a/dpgen/auto_test/lib/pwscf.py b/dpgen/auto_test/lib/pwscf.py index eb3efddcd..e53384003 100644 --- a/dpgen/auto_test/lib/pwscf.py +++ b/dpgen/auto_test/lib/pwscf.py @@ -72,12 +72,7 @@ def _make_pwscf_03_config(sys_data): cc = 0 for ii in range(ntypes): for jj in range(atom_numbs[ii]): - ret += "{} {:f} {:f} {:f}\n".format( - atom_names[ii], - coordinates[cc][0], - coordinates[cc][1], - coordinates[cc][2], - ) + ret += f"{atom_names[ii]} {coordinates[cc][0]:f} {coordinates[cc][1]:f} {coordinates[cc][2]:f}\n" cc += 1 return ret diff --git a/dpgen/auto_test/reproduce.py b/dpgen/auto_test/reproduce.py index ada3102fb..47c266d61 100644 --- a/dpgen/auto_test/reproduce.py +++ b/dpgen/auto_test/reproduce.py @@ -153,12 +153,7 @@ def post_repro( output_ener_tot.extend(output_task_result["energies"]) init_epa = init_ener[jj - idid] / natoms - ptr_data += "{} {:7.3f} {:7.3f} {:7.3f}\n".format( - ii, - init_epa, - output_epa, - output_epa - init_epa, - ) + ptr_data += f"{ii} {init_epa:7.3f} {output_epa:7.3f} {output_epa - init_epa:7.3f}\n" idid += nframe output_ener = np.array(output_ener) output_ener = np.reshape(output_ener, [-1, 1]) diff --git a/dpgen/data/tools/ovito_file_convert.py b/dpgen/data/tools/ovito_file_convert.py index 252b70b22..b22f7e44d 100755 --- a/dpgen/data/tools/ovito_file_convert.py +++ b/dpgen/data/tools/ovito_file_convert.py @@ -2,6 +2,7 @@ """This Script is adapted from Alexander Stukowski, the author of OVITO. See: http://forum.ovito.org/index.php?topic=131.0 for details. """ + import argparse from ovito.io import export_file, import_file diff --git a/dpgen/generator/lib/abacus_scf.py b/dpgen/generator/lib/abacus_scf.py index 561d84500..744147e88 100644 --- a/dpgen/generator/lib/abacus_scf.py +++ b/dpgen/generator/lib/abacus_scf.py @@ -401,9 +401,9 @@ def get_abacus_STRU(STRU, INPUT=None, n_ele=None): data["atom_types"] = types data["cells"] = cell data["coords"] = coords - data[ - "atom_masses" - ] = masses # Notice that this key is not defined in dpdata system. + data["atom_masses"] = ( + masses # Notice that this key is not defined in dpdata system. + ) data["pp_files"] = pp_files data["orb_files"] = orb_files data["dpks_descriptor"] = dpks_descriptor diff --git a/dpgen/generator/lib/lammps.py b/dpgen/generator/lib/lammps.py index d9ff4a493..d96415a3f 100644 --- a/dpgen/generator/lib/lammps.py +++ b/dpgen/generator/lib/lammps.py @@ -169,11 +169,7 @@ def make_lammps_input( pka_vec = _sample_sphere() pka_vec *= pka_vn ret += "group first id 1\n" - ret += 'if "${{restart}} == 0" then "velocity first set {:f} {:f} {:f}"\n'.format( - pka_vec[0], - pka_vec[1], - pka_vec[2], - ) + ret += f'if "${{restart}} == 0" then "velocity first set {pka_vec[0]:f} {pka_vec[1]:f} {pka_vec[2]:f}"\n' ret += "fix 2 all momentum 1 linear 1 1 1\n" ret += "\n" if ensemble.split("-")[0] == "npt": diff --git a/dpgen/generator/lib/pwscf.py b/dpgen/generator/lib/pwscf.py index 3e1ac5e00..ebd8a2dc0 100644 --- a/dpgen/generator/lib/pwscf.py +++ b/dpgen/generator/lib/pwscf.py @@ -110,12 +110,7 @@ def _make_pwscf_03_config(sys_data): cc = 0 for ii in range(ntypes): for jj in range(atom_numbs[ii]): - ret += "{} {:f} {:f} {:f}\n".format( - atom_names[ii], - coordinates[cc][0], - coordinates[cc][1], - coordinates[cc][2], - ) + ret += f"{atom_names[ii]} {coordinates[cc][0]:f} {coordinates[cc][1]:f} {coordinates[cc][2]:f}\n" cc += 1 return ret diff --git a/dpgen/generator/run.py b/dpgen/generator/run.py index d0157145f..6d9f1c4e4 100644 --- a/dpgen/generator/run.py +++ b/dpgen/generator/run.py @@ -591,21 +591,21 @@ def make_train(iter_index, jdata, mdata): if ( len(np.array(model_devi_activation_func).shape) == 2 ): # 2-dim list for emd/fitting net-resolved assignment of actF - jinput["model"]["descriptor"][ - "activation_function" - ] = model_devi_activation_func[ii][0] - jinput["model"]["fitting_net"][ - "activation_function" - ] = model_devi_activation_func[ii][1] + jinput["model"]["descriptor"]["activation_function"] = ( + model_devi_activation_func[ii][0] + ) + jinput["model"]["fitting_net"]["activation_function"] = ( + model_devi_activation_func[ii][1] + ) if ( len(np.array(model_devi_activation_func).shape) == 1 ): # for backward compatibility, 1-dim list, not net-resolved - jinput["model"]["descriptor"][ - "activation_function" - ] = model_devi_activation_func[ii] - jinput["model"]["fitting_net"][ - "activation_function" - ] = model_devi_activation_func[ii] + jinput["model"]["descriptor"]["activation_function"] = ( + model_devi_activation_func[ii] + ) + jinput["model"]["fitting_net"]["activation_function"] = ( + model_devi_activation_func[ii] + ) # dump the input.json with open(os.path.join(task_path, train_input_file), "w") as outfile: json.dump(jinput, outfile, indent=4) @@ -1042,9 +1042,9 @@ def revise_lmp_input_dump(lmp_lines, trj_freq, model_devi_merge_traj=False): def revise_lmp_input_plm(lmp_lines, in_plm, out_plm="output.plumed"): idx = find_only_one_key(lmp_lines, ["fix", "dpgen_plm"]) - lmp_lines[ - idx - ] = f"fix dpgen_plm all plumed plumedfile {in_plm} outfile {out_plm}\n" + lmp_lines[idx] = ( + f"fix dpgen_plm all plumed plumedfile {in_plm} outfile {out_plm}\n" + ) return lmp_lines @@ -1815,9 +1815,10 @@ def _make_model_devi_amber( nsteps = jdata["nsteps"] for ii, pp in enumerate(mdin): - with open(pp) as f, open( - os.path.join(work_path, "init%d.mdin" % ii), "w" - ) as fw: + with ( + open(pp) as f, + open(os.path.join(work_path, "init%d.mdin" % ii), "w") as fw, + ): mdin_str = f.read() # freq, nstlim, qm_region, qm_theory, qm_charge, rcut, graph mdin_str = ( @@ -1883,9 +1884,10 @@ def _make_model_devi_amber( if not isinstance(r, Iterable) or isinstance(r, str): r = [r] # disang file should include RVAL, RVAL2, ... - with open(disang[sys_idx[sys_counter]]) as f, open( - "TEMPLATE.disang", "w" - ) as fw: + with ( + open(disang[sys_idx[sys_counter]]) as f, + open("TEMPLATE.disang", "w") as fw, + ): tl = f.read() for ii, rr in enumerate(r): if isinstance(rr, Iterable) and not isinstance(rr, str): @@ -1999,14 +2001,7 @@ def run_md_model_devi(iter_index, jdata, mdata): if ndx_filename: command += f'&& echo -e "{grp_name}\\n{grp_name}\\n" | {model_devi_exec} trjconv -s {ref_filename} -f {deffnm}.trr -n {ndx_filename} -o {traj_filename} -pbc mol -ur compact -center' else: - command += '&& echo -e "{}\\n{}\\n" | {} trjconv -s {} -f {}.trr -o {} -pbc mol -ur compact -center'.format( - grp_name, - grp_name, - model_devi_exec, - ref_filename, - deffnm, - traj_filename, - ) + command += f'&& echo -e "{grp_name}\\n{grp_name}\\n" | {model_devi_exec} trjconv -s {ref_filename} -f {deffnm}.trr -o {traj_filename} -pbc mol -ur compact -center' command += "&& if [ ! -d traj ]; then \n mkdir traj; fi\n" command += f"python -c \"import dpdata;system = dpdata.System('{traj_filename}', fmt='gromacs/gro'); [system.to_gromacs_gro('traj/%d.gromacstrj' % (i * {trj_freq}), frame_idx=i) for i in range(system.get_nframes())]; system.to_deepmd_npy('traj_deepmd')\"" command += f"&& dp model-devi -m ../graph.000.pb ../graph.001.pb ../graph.002.pb ../graph.003.pb -s traj_deepmd -o model_devi.out -f {trj_freq}" @@ -2508,9 +2503,7 @@ def _make_fp_vasp_inner( tot = len(summaryfmax) - nan_num candi_num = tot - acc_num - fail_num dlog.info( - "summary accurate_ratio: {:8.4f}% candidata_ratio: {:8.4f}% failed_ratio: {:8.4f}% in {:d} structures".format( - acc_num * 100 / tot, candi_num * 100 / tot, fail_num * 100 / tot, tot - ) + f"summary accurate_ratio: {acc_num * 100 / tot:8.4f}% candidata_ratio: {candi_num * 100 / tot:8.4f}% failed_ratio: {fail_num * 100 / tot:8.4f}% in {tot:d} structures" ) # -------------------------------------------------------------------------------------------------------------------------------------- @@ -2662,9 +2655,7 @@ def _trust_limitation_check(sys_idx, lim): continue for cc_key, cc_value in counter.items(): dlog.info( - "system {:s} {:9s} : {:6d} in {:6d} {:6.2f} %".format( - ss, cc_key, cc_value, fp_sum, cc_value / fp_sum * 100 - ) + f"system {ss:s} {cc_key:9s} : {cc_value:6d} in {fp_sum:6d} {cc_value / fp_sum * 100:6.2f} %" ) random.shuffle(fp_candidate) if detailed_report_make_fp: @@ -2738,15 +2729,7 @@ def _trust_limitation_check(sys_idx, lim): numb_task = 0 # ---------------------------------------------------------------------------- dlog.info( - "system {:s} accurate_ratio: {:8.4f} thresholds: {:6.4f} and {:6.4f} eff. task min and max {:4d} {:4d} number of fp tasks: {:6d}".format( - ss, - accurate_ratio, - fp_accurate_soft_threshold, - fp_accurate_threshold, - fp_task_min, - this_fp_task_max, - numb_task, - ) + f"system {ss:s} accurate_ratio: {accurate_ratio:8.4f} thresholds: {fp_accurate_soft_threshold:6.4f} and {fp_accurate_threshold:6.4f} eff. task min and max {fp_task_min:4d} {this_fp_task_max:4d} number of fp tasks: {numb_task:6d}" ) # make fp tasks @@ -2878,21 +2861,15 @@ def _trust_limitation_check(sys_idx, lim): os.chdir(cwd) if count_bad_box > 0: dlog.info( - "system {:s} skipped {:6d} confs with bad box, {:6d} remains".format( - ss, count_bad_box, numb_task - count_bad_box - ) + f"system {ss:s} skipped {count_bad_box:6d} confs with bad box, {numb_task - count_bad_box:6d} remains" ) if count_bad_cluster > 0: dlog.info( - "system {:s} skipped {:6d} confs with bad cluster, {:6d} remains".format( - ss, count_bad_cluster, numb_task - count_bad_cluster - ) + f"system {ss:s} skipped {count_bad_cluster:6d} confs with bad cluster, {numb_task - count_bad_cluster:6d} remains" ) if model_devi_engine == "calypso": dlog.info( - "summary accurate_ratio: {:8.4f}% candidata_ratio: {:8.4f}% failed_ratio: {:8.4f}% in {:d} structures".format( - acc_num * 100 / tot, candi_num * 100 / tot, fail_num * 100 / tot, tot - ) + f"summary accurate_ratio: {acc_num * 100 / tot:8.4f}% candidata_ratio: {candi_num * 100 / tot:8.4f}% failed_ratio: {fail_num * 100 / tot:8.4f}% in {tot:d} structures" ) if cluster_cutoff is None: cwd = os.getcwd() diff --git a/dpgen/gui.py b/dpgen/gui.py index f116ee246..4f92d43ec 100644 --- a/dpgen/gui.py +++ b/dpgen/gui.py @@ -1,5 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later """DP-GUI entrypoint.""" + import argparse diff --git a/dpgen/simplify/simplify.py b/dpgen/simplify/simplify.py index a69ffe12e..eec08e8bf 100644 --- a/dpgen/simplify/simplify.py +++ b/dpgen/simplify/simplify.py @@ -8,6 +8,7 @@ 01: calculate model deviations of the rest dataset, pick up data with proper model deviaiton 02: fp (optional, if the original dataset do not have fp data, same as generator) """ + import glob import logging import os @@ -336,9 +337,10 @@ def post_model_devi(iter_index, jdata, mdata): "reach a place that should NOT be reached..." ) else: - with open(os.path.join(work_path, detail_file_name)) as f, open( - os.path.join(work_path, true_error_file_name) - ) as f_err: + with ( + open(os.path.join(work_path, detail_file_name)) as f, + open(os.path.join(work_path, true_error_file_name)) as f_err, + ): for line, line_err in zip(f, f_err): if line.startswith("# data.rest.old"): name = (line.split()[1]).split("/")[-1] @@ -389,9 +391,7 @@ def post_model_devi(iter_index, jdata, mdata): fp_sum = sum(counter.values()) for cc_key, cc_value in counter.items(): dlog.info( - "{:9s} : {:6d} in {:6d} {:6.2f} %".format( - cc_key, cc_value, fp_sum, cc_value / fp_sum * 100 - ) + f"{cc_key:9s} : {cc_value:6d} in {fp_sum:6d} {cc_value / fp_sum * 100:6.2f} %" ) if counter["candidate"] == 0 and counter["failed"] > 0: diff --git a/tests/test_check_examples.py b/tests/test_check_examples.py index cbbae79b6..3033740f2 100644 --- a/tests/test_check_examples.py +++ b/tests/test_check_examples.py @@ -1,6 +1,7 @@ """This module ensures input in the examples directory could pass the argument checking. """ + import json import unittest from pathlib import Path diff --git a/tests/test_collect.py b/tests/test_collect.py index 434f2c0ea..99979697d 100644 --- a/tests/test_collect.py +++ b/tests/test_collect.py @@ -15,7 +15,11 @@ def setUp(self): ) def test_collect_data(self): - with tempfile.TemporaryDirectory() as inpdir, tempfile.TemporaryDirectory() as outdir, tempfile.NamedTemporaryFile() as param_file: + with ( + tempfile.TemporaryDirectory() as inpdir, + tempfile.TemporaryDirectory() as outdir, + tempfile.NamedTemporaryFile() as param_file, + ): self.data.to_deepmd_npy(Path(inpdir) / "iter.000000" / "02.fp" / "data.000") self.data.to_deepmd_npy( Path(inpdir) / "iter.000001" / "02.fp" / "data.000" / "aa"