From 21a10321f8596169220f90234a016c4c7c1f0806 Mon Sep 17 00:00:00 2001 From: Lachlan Perrier Date: Tue, 6 Aug 2024 12:10:38 -0400 Subject: [PATCH 1/7] Running with network accelerate --- requirements.txt | 4 ++-- tm2py/components/network/highway/highway_assign.py | 3 ++- tm2py/config.py | 1 + 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 18b275f7..0e100c6a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -25,9 +25,9 @@ osmnx >= 0.12 pandas > 1.0 pydantic < 2.0 pyproj > 2.2.0 -pywin32==224 ; sys_platform == 'win32' +pywin32==306 ; sys_platform == 'win32' pyyaml -pywin32==224 ; sys_platform == 'win32' +pywin32==306 ; sys_platform == 'win32' rtree scipy shapely diff --git a/tm2py/components/network/highway/highway_assign.py b/tm2py/components/network/highway/highway_assign.py index fa2091c2..7035249d 100644 --- a/tm2py/components/network/highway/highway_assign.py +++ b/tm2py/components/network/highway/highway_assign.py @@ -332,7 +332,8 @@ def _get_assignment_spec( "normalized_gap": 0.0, }, "performance_settings": { - "number_of_processors": self.controller.num_processors + "number_of_processors": self.controller.num_processors, + "network_acceleration": self.config.network_acceleration, }, } if not path_analysis: diff --git a/tm2py/config.py b/tm2py/config.py index fed43e3b..cf00ada3 100644 --- a/tm2py/config.py +++ b/tm2py/config.py @@ -957,6 +957,7 @@ class HighwayConfig(ConfigItem): generic_highway_mode_code: str = Field(min_length=1, max_length=1) relative_gaps: Tuple[HighwayRelativeGapConfig, ...] = Field() max_iterations: int = Field(ge=0) + network_acceleration: bool = Field() area_type_buffer_dist_miles: float = Field(gt=0) drive_access_output_skim_path: Optional[str] = Field(default=None) output_skim_path: pathlib.Path = Field() From 2bc4a90edc89d2fc4a1f7c547e0ae135d4457f1a Mon Sep 17 00:00:00 2001 From: Lachlan Perrier Date: Mon, 26 Aug 2024 14:10:24 -0400 Subject: [PATCH 2/7] update install instructions --- README.md | 44 +++++++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index c73e5845..ddf7cea0 100644 --- a/README.md +++ b/README.md @@ -12,38 +12,28 @@ A python package to run the San Francisco Bay Area's Travel Model. ## Installation -Recommended install in a virtual environment. +It is recommended that tm2install in a virtual environment. Stable (to come - use bleeding edge for now): - ```bash pip install tm2py ``` -Bleeding edge: -TODO: Which environment is this? Does it still work for anyone? +For Developers, it is recomended that the following instructions are used to install +*Note: The Python Environment has recently been updated to python 3.11, there may be some instabilites with current build* +```bat +git clone --branch develop https://github.com/BayAreaMetro/tm2py.git -```bash -conda env create -f environment.yml +conda create -n tm2py python=3.11.9 conda activate tm2py -pip install git+https://github.com/bayareametro/tm2py@develop -``` - -The above directions didn't work for the MTC Windows environment. The following method did work, on a machine with Emme-4.6.0 installed. This required a compiled GDAL/Fiona package set for python 3.7, this can be found in the [lib directory](/lib/) , consisting of the following: - -1. GDAL-3.3.2-cp37-cp37m-win_amd64.whl -2. pyproj-3.2.1-cp37-cp37m-win_amd64.whl -3. Fiona-1.8.20-cp37-cp37m-win_amd64.whl -4. Shapely-1.8.1-cp37-cp37m-win_amd64.whl -5. geopandas-0.10.2-py2.py3-none-any.whl - -With these files in hand, the following installation instructions work: +conda install gdal +conda install pyproj +conda install fiona +conda install shapely +conda install geopandas -```bat -conda create -n tm2py python=3.7.6 -conda activate tm2py -pip install [the packages listed above, in that order] cd +git pip install -e . conda env config vars set GDAL_VERSION=3.3.2 ``` @@ -51,9 +41,17 @@ Finally, install the Emme python packages using the Emme GUI. This effectively c `C:\Users\%USERNAME%\.conda\envs\tm2py\Lib\site-packages\emme.pth` with the following contents, so you could create the file yourself. ```python -import os, site; site.addsitedir("C:/Program Files/INRO/Emme/Emme 4/Emme-4.6.0/Python37/Lib/site-packages") +import os, site; site.addsitedir(os.path.join(os.environ["EMMEPATH"], "Python311/Lib/site-packages")) +``` + +*This should start Emme OpenPath, if it does not you should be able to manually set the correct version of emme such as below* +```python +import os, site +os.environ["EMMEPATH"] = r"C:\Program Files\Bentley\OpenPaths\EMME 24.00.00" +site.addsitedir(os.path.join(os.environ["EMMEPATH"], "Python311/Lib/site-packages")) ``` + In troubleshooting, sometimes DLL load failure errors would occur which may be resolved by importing gdal before importing emme packages. Emme support explained this thusly: At load time, the EMME API will always load the geos_c co-located with the EMME API, unless it was already loaded from some other location, which is the case when you import GDAL first. EMME API seems to be compatible with the newer GDAL/geos_c (reminder: not tested!). But this does not appear to be the case the other way around (newer GDAL is not compatible with older geos_c). From 5696c0ccd9e12bc1af95ae8cd7dfcd4c02cf78f1 Mon Sep 17 00:00:00 2001 From: Lachlan Perrier Date: Fri, 30 Aug 2024 16:08:47 -0400 Subject: [PATCH 3/7] minor updates for recent runs --- scripts/compare_skims.py | 8 ++--- scripts/compile_model_runs.py | 60 ++++++++++++++++++++++++++++++++--- 2 files changed, 60 insertions(+), 8 deletions(-) diff --git a/scripts/compare_skims.py b/scripts/compare_skims.py index 82c95ca6..ad1cd8dd 100644 --- a/scripts/compare_skims.py +++ b/scripts/compare_skims.py @@ -25,10 +25,10 @@ def read_matrix_as_long_df(path: Path, run_name): ) -a = read_matrix_as_long_df( - r"D:\TEMP\TM2.2.1.1-New_network_rerun\TM2.2.1.1_new_taz\skim_matrices\highway\HWYSKMAM_taz.omx", - "test", -) +# a = read_matrix_as_long_df( +# r"D:\TEMP\TM2.2.1.1-New_network_rerun\TM2.2.1.1_new_taz\skim_matrices\highway\HWYSKMAM_taz.omx", +# "test", +# ) # %% all_skims = [] for skim_matrix_path in network_fid_path.rglob("*AM_taz.omx"): diff --git a/scripts/compile_model_runs.py b/scripts/compile_model_runs.py index 553a3656..70bea3ec 100644 --- a/scripts/compile_model_runs.py +++ b/scripts/compile_model_runs.py @@ -7,7 +7,7 @@ from shapely.geometry import LineString input_dir = Path( - r"Z:\MTC\US0024934.9168\Task_3_runtime_improvements\3.1_network_fidelity\run_result" + r"Z:\MTC\US0024934.9168\Task_3_runtime_improvements\3.2_remove_cosmetic_nodes\run_result" ) output_dir = input_dir / "consolidated_3" @@ -19,8 +19,8 @@ # print("writing") # input[["#link_id", "geometry"]].to_file(output_dir / "test_geom.geojson") -scenarios_to_consolidate = (11, 12, 13, 14, 15) -runs_to_consolidate = (3, 4) +scenarios_to_consolidate = (12,) +runs_to_consolidate = (15, 23, 24, 25) # %% @@ -42,7 +42,7 @@ def read_file_and_tag( return None run = file.parent.parent.stem - run_number = int(run.split("_")[-1]) + run_number = int(run.split("_")[1]) if run_number not in runs_to_consolidate: return None @@ -158,7 +158,59 @@ def combine_tables(dfs, columns_same): links_wide_table["ft"] = links_wide_table[ft_cols].max(axis=1) links_wide_table = links_wide_table.drop(columns=ft_cols) +#%% +plotting_table = links_wide_table.head(10_000_000).dropna() +print(plotting_table.shape) +import matplotlib.pyplot as plt +from scipy import stats +for i in range(1, 7): + slicer = plotting_table["ft"] == i + x = plotting_table.loc[slicer, "@volau_run24_scenAM"] + y = plotting_table.loc[slicer, "@volau_run25_scenAM"] + plt.scatter(x, y) + plt.xlabel("run 3") + plt.ylabel("run 19") + print(stats.linregress(x, y)) + print(i) + # plt.show() +#%% +vol_pairs_to_compare = [ + ("@volau_run23_scenAM", "@volau_run15_scenAM"), + ("@volau_run23_scenAM", "@volau_run24_scenAM"), + ("@volau_run23_scenAM", "@volau_run25_scenAM"), +] + +bases = [] +comparisons = [] +func_types = [] +slopes = [] +r_vals = [] +for base, compare in vol_pairs_to_compare: + print(base, compare) + stats_table = links_wide_table.dropna() + for i in range(1, 7): + slicer = stats_table["ft"] == i + x = stats_table.loc[slicer, "@volau_run24_scenAM"] + y = stats_table.loc[slicer, "@volau_run25_scenAM"] + lingress = stats.linregress(x, y) + bases.append(base) + comparisons.append(compare) + func_types.append(i) + slopes.append(lingress.slope) + r_vals.append(lingress.rvalue) + +print(pd.DataFrame.from_dict( + dict( + bases = bases, + comparisons = comparisons, + func_types = func_types, + slopes = slopes, + r_vals = r_vals, + ) +).to_markdown()) +#%% +import pyperclip # %% links_wide_table.to_file( Path( From fafb004f0ba8451b1a3a2257346af6fcb9bf76e9 Mon Sep 17 00:00:00 2001 From: Lachlan Perrier Date: Fri, 30 Aug 2024 16:55:29 -0400 Subject: [PATCH 4/7] updated compile model runs --- scripts/compile_model_runs.py | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/scripts/compile_model_runs.py b/scripts/compile_model_runs.py index 70bea3ec..35152476 100644 --- a/scripts/compile_model_runs.py +++ b/scripts/compile_model_runs.py @@ -21,6 +21,7 @@ scenarios_to_consolidate = (12,) runs_to_consolidate = (15, 23, 24, 25) +# runs_to_consolidate = (15, 22) # %% @@ -166,20 +167,30 @@ def combine_tables(dfs, columns_same): from scipy import stats for i in range(1, 7): slicer = plotting_table["ft"] == i - x = plotting_table.loc[slicer, "@volau_run24_scenAM"] - y = plotting_table.loc[slicer, "@volau_run25_scenAM"] - plt.scatter(x, y) - plt.xlabel("run 3") - plt.ylabel("run 19") + x = plotting_table.loc[slicer, "@volau_run15_scenAM"] + y = plotting_table.loc[slicer, "@volau_run23_scenAM"] + plt.scatter(x, y, label=f'ft = {i}') + plt.xlabel("run 15") + plt.ylabel("run 22") print(stats.linregress(x, y)) print(i) # plt.show() +plt.legend() #%% vol_pairs_to_compare = [ ("@volau_run23_scenAM", "@volau_run15_scenAM"), ("@volau_run23_scenAM", "@volau_run24_scenAM"), ("@volau_run23_scenAM", "@volau_run25_scenAM"), ] +rename_dict = { + "@volau_run15_scenAM": "emme 4.6.1", + "@volau_run23_scenAM": "emme Open Paths", + "@volau_run24_scenAM": "emme Open Paths Network Accelerate", + "@volau_run25_scenAM": "remove Cosmetic Nodes", +} +# vol_pairs_to_compare = [ +# ("@volau_run15_scenAM", "@volau_run22_scenAM"), +# ] bases = [] comparisons = [] @@ -191,8 +202,8 @@ def combine_tables(dfs, columns_same): stats_table = links_wide_table.dropna() for i in range(1, 7): slicer = stats_table["ft"] == i - x = stats_table.loc[slicer, "@volau_run24_scenAM"] - y = stats_table.loc[slicer, "@volau_run25_scenAM"] + x = stats_table.loc[slicer, base] + y = stats_table.loc[slicer, compare] lingress = stats.linregress(x, y) bases.append(base) comparisons.append(compare) @@ -200,7 +211,7 @@ def combine_tables(dfs, columns_same): slopes.append(lingress.slope) r_vals.append(lingress.rvalue) -print(pd.DataFrame.from_dict( +df = pd.DataFrame.from_dict( dict( bases = bases, comparisons = comparisons, @@ -208,7 +219,12 @@ def combine_tables(dfs, columns_same): slopes = slopes, r_vals = r_vals, ) -).to_markdown()) +) +df["bases"] = df["bases"].map(rename_dict) +df["comparisons"] = df["comparisons"].map(rename_dict) +df["slopes"] = df["slopes"].round(2) +df["r_vals"] = df["r_vals"].round(2) +print(df.to_markdown()) #%% import pyperclip # %% From ae0a0c3b098a19fdea3eb4f41c5b465ff9a998d3 Mon Sep 17 00:00:00 2001 From: Lachlan Perrier Date: Tue, 3 Sep 2024 16:04:02 -0400 Subject: [PATCH 5/7] minor update in post processing scripts --- scripts/compile_model_runs.py | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/scripts/compile_model_runs.py b/scripts/compile_model_runs.py index 35152476..b9eef351 100644 --- a/scripts/compile_model_runs.py +++ b/scripts/compile_model_runs.py @@ -188,13 +188,24 @@ def combine_tables(dfs, columns_same): "@volau_run24_scenAM": "emme Open Paths Network Accelerate", "@volau_run25_scenAM": "remove Cosmetic Nodes", } + +ft_map = { + 1: "Freeway", + 2: "Expressway", + 3: "Ramp", + 4: "Divided Arterial", + 5: "Undivided Arterial", + 6: "Collector", + 7: "Local", + 8: "Connector" +} # vol_pairs_to_compare = [ # ("@volau_run15_scenAM", "@volau_run22_scenAM"), # ] bases = [] comparisons = [] -func_types = [] +facility_type = [] slopes = [] r_vals = [] for base, compare in vol_pairs_to_compare: @@ -207,23 +218,24 @@ def combine_tables(dfs, columns_same): lingress = stats.linregress(x, y) bases.append(base) comparisons.append(compare) - func_types.append(i) + facility_type.append(i) slopes.append(lingress.slope) r_vals.append(lingress.rvalue) df = pd.DataFrame.from_dict( dict( - bases = bases, - comparisons = comparisons, - func_types = func_types, + base = bases, + comparison = comparisons, + facility_type = facility_type, slopes = slopes, r_vals = r_vals, ) ) -df["bases"] = df["bases"].map(rename_dict) -df["comparisons"] = df["comparisons"].map(rename_dict) +df["base"] = df["base"].map(rename_dict) +df["comparison"] = df["comparison"].map(rename_dict) df["slopes"] = df["slopes"].round(2) df["r_vals"] = df["r_vals"].round(2) +df["facility_type"] = df["facility_type"].map(ft_map) print(df.to_markdown()) #%% import pyperclip From be0ab3e250afe3cde247e2ce02ca994956f1cf01 Mon Sep 17 00:00:00 2001 From: Lachlan Perrier Date: Mon, 9 Sep 2024 20:27:47 -0400 Subject: [PATCH 6/7] minor changes --- scripts/compile_model_runs.py | 65 +++++++++++++++++++++++++++++------ 1 file changed, 54 insertions(+), 11 deletions(-) diff --git a/scripts/compile_model_runs.py b/scripts/compile_model_runs.py index b9eef351..51ecf198 100644 --- a/scripts/compile_model_runs.py +++ b/scripts/compile_model_runs.py @@ -7,7 +7,7 @@ from shapely.geometry import LineString input_dir = Path( - r"Z:\MTC\US0024934.9168\Task_3_runtime_improvements\3.2_remove_cosmetic_nodes\run_result" + r"Z:\MTC\US0024934.9168\Task_3_runtime_improvements\3.1_network_fidelity\run_result" ) output_dir = input_dir / "consolidated_3" @@ -20,7 +20,7 @@ # input[["#link_id", "geometry"]].to_file(output_dir / "test_geom.geojson") scenarios_to_consolidate = (12,) -runs_to_consolidate = (15, 23, 24, 25) +runs_to_consolidate = (15, 22, 26) # runs_to_consolidate = (15, 22) # %% @@ -154,6 +154,29 @@ def combine_tables(dfs, columns_same): links_wide_table["direction"] = links_wide_table["geometry"].apply( get_linestring_direction ) +#%% +# a little side quest + +rename_dict = { + 15: "run 15 old code, emme 4.6.1", + 22: "run 22 previous version of pr, open paths", + 26: "run 26 final version of pr, emme 4.6.1" +} +ft_map = { + 1.: "Freeway", + 2.: "Expressway", + 3.: "Ramp", + 4.: "Divided Arterial", + 5.: "Undivided Arterial", + 6.: "Collector", + 7.: "Local", + 8.: "Connector", + 99.: "Service Road " +} + +all_tables = pd.concat(all_links_no_none) +print(pd.crosstab(all_tables["run_number"].map(rename_dict), all_tables["@ft"]).rename(columns=ft_map).to_markdown()) + # %% ft_cols = [col for col in links_wide_table.columns if "ft_" in col] @@ -167,28 +190,34 @@ def combine_tables(dfs, columns_same): from scipy import stats for i in range(1, 7): slicer = plotting_table["ft"] == i - x = plotting_table.loc[slicer, "@volau_run15_scenAM"] - y = plotting_table.loc[slicer, "@volau_run23_scenAM"] + x = plotting_table.loc[slicer, "@volau_run22_scenAM"] + y = plotting_table.loc[slicer, "@volau_run26_scenAM"] plt.scatter(x, y, label=f'ft = {i}') - plt.xlabel("run 15") - plt.ylabel("run 22") + plt.xlabel("run 22") + plt.ylabel("run 26") print(stats.linregress(x, y)) print(i) - # plt.show() + plt.show() plt.legend() #%% vol_pairs_to_compare = [ - ("@volau_run23_scenAM", "@volau_run15_scenAM"), - ("@volau_run23_scenAM", "@volau_run24_scenAM"), - ("@volau_run23_scenAM", "@volau_run25_scenAM"), + ("@volau_run15_scenAM", "@volau_run26_scenAM"), + ("@volau_run22_scenAM", "@volau_run26_scenAM"), + # ("@volau_run23_scenAM", "@volau_run25_scenAM"), ] rename_dict = { - "@volau_run15_scenAM": "emme 4.6.1", + "@volau_run15_scenAM": "run 15 old code, emme 4.6.1", + "@volau_run22_scenAM": "run 22 previous version of pr, open paths", "@volau_run23_scenAM": "emme Open Paths", "@volau_run24_scenAM": "emme Open Paths Network Accelerate", "@volau_run25_scenAM": "remove Cosmetic Nodes", + "@volau_run26_scenAM": "run 26 final version of pr, emme 4.6.1" } +# vs 22 +# add rmse +# add total vmt for vmt +# check number fo links ft_map = { 1: "Freeway", 2: "Expressway", @@ -208,6 +237,12 @@ def combine_tables(dfs, columns_same): facility_type = [] slopes = [] r_vals = [] + +rmse = [] +base_vmt = [] +comparison_vmt = [] + + for base, compare in vol_pairs_to_compare: print(base, compare) stats_table = links_wide_table.dropna() @@ -216,11 +251,16 @@ def combine_tables(dfs, columns_same): x = stats_table.loc[slicer, base] y = stats_table.loc[slicer, compare] lingress = stats.linregress(x, y) + bases.append(base) comparisons.append(compare) facility_type.append(i) slopes.append(lingress.slope) r_vals.append(lingress.rvalue) + rmse.append(round(((x-y)**2).mean()**0.5,2)) + + base_vmt.append(x.sum()) + comparison_vmt.append(y.sum()) df = pd.DataFrame.from_dict( dict( @@ -229,6 +269,9 @@ def combine_tables(dfs, columns_same): facility_type = facility_type, slopes = slopes, r_vals = r_vals, + rmse = rmse, + base_vmt = base_vmt, + comparison_vmt = comparison_vmt, ) ) df["base"] = df["base"].map(rename_dict) From 7bc59a7faebd1e7ed485882cc2842b873c54b5b6 Mon Sep 17 00:00:00 2001 From: Lachlan Perrier Date: Thu, 5 Dec 2024 12:15:36 -0500 Subject: [PATCH 7/7] minor changes to compare model_runs --- scripts/compile_model_runs.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/scripts/compile_model_runs.py b/scripts/compile_model_runs.py index 51ecf198..4d3c74ac 100644 --- a/scripts/compile_model_runs.py +++ b/scripts/compile_model_runs.py @@ -20,7 +20,7 @@ # input[["#link_id", "geometry"]].to_file(output_dir / "test_geom.geojson") scenarios_to_consolidate = (12,) -runs_to_consolidate = (15, 22, 26) +runs_to_consolidate = (15, 22, 26, 27) # runs_to_consolidate = (15, 22) # %% @@ -160,7 +160,8 @@ def combine_tables(dfs, columns_same): rename_dict = { 15: "run 15 old code, emme 4.6.1", 22: "run 22 previous version of pr, open paths", - 26: "run 26 final version of pr, emme 4.6.1" + 26: "run 26 final version of pr, emme 4.6.1", + 27: "run 27, PR with dropping maz no drive network", } ft_map = { 1.: "Freeway", @@ -190,19 +191,24 @@ def combine_tables(dfs, columns_same): from scipy import stats for i in range(1, 7): slicer = plotting_table["ft"] == i - x = plotting_table.loc[slicer, "@volau_run22_scenAM"] - y = plotting_table.loc[slicer, "@volau_run26_scenAM"] + x = plotting_table.loc[slicer, "@volau_run15_scenAM"] + y = plotting_table.loc[slicer, "@volau_run27_scenAM"] plt.scatter(x, y, label=f'ft = {i}') - plt.xlabel("run 22") - plt.ylabel("run 26") + plt.xlabel("run 15") + plt.ylabel("run 27") print(stats.linregress(x, y)) print(i) - plt.show() + # plt.show() plt.legend() #%% + +no_vol_links = links_wide_table[(links_wide_table["@volau_run27_scenAM"] < 1) & (links_wide_table["ft"] == 1)] +slicer = (no_vol_links["@volau_run15_scenAM"] > 1) +no_vol_links[slicer] +#%% vol_pairs_to_compare = [ - ("@volau_run15_scenAM", "@volau_run26_scenAM"), - ("@volau_run22_scenAM", "@volau_run26_scenAM"), + ("@volau_run15_scenAM", "@volau_run27_scenAM"), + # ("@volau_run22_scenAM", "@volau_run26_scenAM"), # ("@volau_run23_scenAM", "@volau_run25_scenAM"), ] rename_dict = {